1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
89 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
90 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
95 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
99 extern int ixl_enable_iwarp;
100 extern int ixl_limit_iwarp_msix;
103 static const char * const ixl_fc_string[6] = {
112 static char *ixl_fec_string[3] = {
114 "CL74 FC-FEC/BASE-R",
118 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
121 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
124 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
127 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
128 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
131 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
132 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
133 hw->aq.api_maj_ver, hw->aq.api_min_ver,
134 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
135 IXL_NVM_VERSION_HI_SHIFT,
136 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
137 IXL_NVM_VERSION_LO_SHIFT,
139 oem_ver, oem_build, oem_patch);
143 ixl_print_nvm_version(struct ixl_pf *pf)
145 struct i40e_hw *hw = &pf->hw;
146 device_t dev = pf->dev;
149 sbuf = sbuf_new_auto();
150 ixl_nvm_version_str(hw, sbuf);
152 device_printf(dev, "%s\n", sbuf_data(sbuf));
157 * ixl_get_fw_mode - Check the state of FW
158 * @hw: device hardware structure
160 * Identify state of FW. It might be in a recovery mode
161 * which limits functionality and requires special handling
164 * @returns FW mode (normal, recovery, unexpected EMP reset)
166 static enum ixl_fw_mode
167 ixl_get_fw_mode(struct ixl_pf *pf)
169 struct i40e_hw *hw = &pf->hw;
170 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
174 if (pf->recovery_mode)
175 return IXL_FW_MODE_RECOVERY;
177 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179 /* Is set and has one of expected values */
180 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
181 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
182 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
184 fw_mode = IXL_FW_MODE_RECOVERY;
186 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
187 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
188 fw_mode = IXL_FW_MODE_UEMPR;
194 * ixl_pf_reset - Reset the PF
197 * Ensure that FW is in the right state and do the reset
200 * @returns zero on success, or an error code on failure.
203 ixl_pf_reset(struct ixl_pf *pf)
205 struct i40e_hw *hw = &pf->hw;
206 enum i40e_status_code status;
207 enum ixl_fw_mode fw_mode;
209 fw_mode = ixl_get_fw_mode(pf);
210 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
211 if (fw_mode == IXL_FW_MODE_RECOVERY) {
212 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
213 /* Don't try to reset device if it's in recovery mode */
217 status = i40e_pf_reset(hw);
218 if (status == I40E_SUCCESS)
221 /* Check FW mode again in case it has changed while
222 * waiting for reset to complete */
223 fw_mode = ixl_get_fw_mode(pf);
224 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
225 if (fw_mode == IXL_FW_MODE_RECOVERY) {
226 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
230 if (fw_mode == IXL_FW_MODE_UEMPR)
231 device_printf(pf->dev,
232 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234 device_printf(pf->dev, "PF reset failure %s\n",
235 i40e_stat_str(hw, status));
240 * ixl_setup_hmc - Setup LAN Host Memory Cache
243 * Init and configure LAN Host Memory Cache
245 * @returns 0 on success, EIO on error
248 ixl_setup_hmc(struct ixl_pf *pf)
250 struct i40e_hw *hw = &pf->hw;
251 enum i40e_status_code status;
253 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
254 hw->func_caps.num_rx_qp, 0, 0);
256 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
257 i40e_stat_str(hw, status));
261 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
264 i40e_stat_str(hw, status));
272 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
275 * Shutdown Host Memory Cache if configured.
279 ixl_shutdown_hmc(struct ixl_pf *pf)
281 struct i40e_hw *hw = &pf->hw;
282 enum i40e_status_code status;
284 /* HMC not configured, no need to shutdown */
285 if (hw->hmc.hmc_obj == NULL)
288 status = i40e_shutdown_lan_hmc(hw);
290 device_printf(pf->dev,
291 "Shutdown LAN HMC failed with code %s\n",
292 i40e_stat_str(hw, status));
295 * Write PF ITR values to queue ITR registers.
298 ixl_configure_itr(struct ixl_pf *pf)
300 ixl_configure_tx_itr(pf);
301 ixl_configure_rx_itr(pf);
304 /*********************************************************************
306 * Get the hardware capabilities
308 **********************************************************************/
311 ixl_get_hw_capabilities(struct ixl_pf *pf)
313 struct i40e_aqc_list_capabilities_element_resp *buf;
314 struct i40e_hw *hw = &pf->hw;
315 device_t dev = pf->dev;
316 enum i40e_status_code status;
317 int len, i2c_intfc_num;
321 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
322 hw->func_caps.iwarp = 0;
326 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
329 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
330 device_printf(dev, "Unable to allocate cap memory\n");
334 /* This populates the hw struct */
335 status = i40e_aq_discover_capabilities(hw, buf, len,
336 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340 /* retry once with a larger buffer */
344 } else if (status != I40E_SUCCESS) {
345 device_printf(dev, "capability discovery failed; status %s, error %s\n",
346 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
351 * Some devices have both MDIO and I2C; since this isn't reported
352 * by the FW, check registers to see if an I2C interface exists.
354 i2c_intfc_num = ixl_find_i2c_interface(pf);
355 if (i2c_intfc_num != -1)
358 /* Determine functions to use for driver I2C accesses */
359 switch (pf->i2c_access_method) {
360 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
361 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
362 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
363 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
366 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
370 case IXL_I2C_ACCESS_METHOD_AQ:
371 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
372 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
375 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
376 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
379 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
380 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
383 /* Should not happen */
384 device_printf(dev, "Error setting I2C access functions\n");
388 /* Print a subset of the capability information. */
390 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
391 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
392 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
393 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
394 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
395 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
401 /* For the set_advertise sysctl */
403 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
405 device_t dev = pf->dev;
408 /* Make sure to initialize the device to the complete list of
409 * supported speeds on driver load, to ensure unloading and
410 * reloading the driver will restore this value.
412 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
414 /* Non-fatal error */
415 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420 pf->advertised_speed =
421 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
425 ixl_teardown_hw_structs(struct ixl_pf *pf)
427 enum i40e_status_code status = 0;
428 struct i40e_hw *hw = &pf->hw;
429 device_t dev = pf->dev;
431 /* Shutdown LAN HMC */
432 if (hw->hmc.hmc_obj) {
433 status = i40e_shutdown_lan_hmc(hw);
436 "init: LAN HMC shutdown failure; status %s\n",
437 i40e_stat_str(hw, status));
442 /* Shutdown admin queue */
443 ixl_disable_intr0(hw);
444 status = i40e_shutdown_adminq(hw);
447 "init: Admin Queue shutdown failure; status %s\n",
448 i40e_stat_str(hw, status));
450 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
456 ** Creates new filter with given MAC address and VLAN ID
458 static struct ixl_mac_filter *
459 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
461 struct ixl_mac_filter *f;
463 /* create a new empty filter */
464 f = malloc(sizeof(struct ixl_mac_filter),
465 M_IXL, M_NOWAIT | M_ZERO);
467 LIST_INSERT_HEAD(headp, f, ftle);
468 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
476 * ixl_free_filters - Free all filters in given list
477 * headp - pointer to list head
479 * Frees memory used by each entry in the list.
480 * Does not remove filters from HW.
483 ixl_free_filters(struct ixl_ftl_head *headp)
485 struct ixl_mac_filter *f, *nf;
487 f = LIST_FIRST(headp);
489 nf = LIST_NEXT(f, ftle);
498 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
500 struct ixl_add_maddr_arg *ama = arg;
501 struct ixl_vsi *vsi = ama->vsi;
502 const u8 *macaddr = (u8*)LLADDR(sdl);
503 struct ixl_mac_filter *f;
505 /* Does one already exist */
506 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
510 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
512 device_printf(vsi->dev, "WARNING: no filter available!!\n");
515 f->flags |= IXL_FILTER_MC;
520 /*********************************************************************
523 * Routines for multicast and vlan filter management.
525 *********************************************************************/
527 ixl_add_multi(struct ixl_vsi *vsi)
529 struct ifnet *ifp = vsi->ifp;
530 struct i40e_hw *hw = vsi->hw;
532 struct ixl_add_maddr_arg cb_arg;
534 IOCTL_DEBUGOUT("ixl_add_multi: begin");
536 mcnt = if_llmaddr_count(ifp);
537 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
538 i40e_aq_set_vsi_multicast_promiscuous(hw,
539 vsi->seid, TRUE, NULL);
540 /* delete all existing MC filters */
541 ixl_del_multi(vsi, true);
546 LIST_INIT(&cb_arg.to_add);
548 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
550 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
552 IOCTL_DEBUGOUT("ixl_add_multi: end");
556 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
558 struct ixl_mac_filter *f = arg;
560 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
567 ixl_del_multi(struct ixl_vsi *vsi, bool all)
569 struct ixl_ftl_head to_del;
570 struct ifnet *ifp = vsi->ifp;
571 struct ixl_mac_filter *f, *fn;
574 IOCTL_DEBUGOUT("ixl_del_multi: begin");
577 /* Search for removed multicast addresses */
578 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
579 if ((f->flags & IXL_FILTER_MC) == 0 ||
580 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
583 LIST_REMOVE(f, ftle);
584 LIST_INSERT_HEAD(&to_del, f, ftle);
589 ixl_del_hw_filters(vsi, &to_del, mcnt);
593 ixl_link_up_msg(struct ixl_pf *pf)
595 struct i40e_hw *hw = &pf->hw;
596 struct ifnet *ifp = pf->vsi.ifp;
597 char *req_fec_string, *neg_fec_string;
600 fec_abilities = hw->phy.link_info.req_fec_info;
601 /* If both RS and KR are requested, only show RS */
602 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
603 req_fec_string = ixl_fec_string[0];
604 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
605 req_fec_string = ixl_fec_string[1];
607 req_fec_string = ixl_fec_string[2];
609 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
610 neg_fec_string = ixl_fec_string[0];
611 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
612 neg_fec_string = ixl_fec_string[1];
614 neg_fec_string = ixl_fec_string[2];
616 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
618 ixl_link_speed_string(hw->phy.link_info.link_speed),
619 req_fec_string, neg_fec_string,
620 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
621 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
622 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
623 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
624 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
625 ixl_fc_string[1] : ixl_fc_string[0]);
629 * Configure admin queue/misc interrupt cause registers in hardware.
632 ixl_configure_intr0_msix(struct ixl_pf *pf)
634 struct i40e_hw *hw = &pf->hw;
637 /* First set up the adminq - vector 0 */
638 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
639 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
641 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
642 I40E_PFINT_ICR0_ENA_GRST_MASK |
643 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
644 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
645 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
646 I40E_PFINT_ICR0_ENA_VFLR_MASK |
647 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
648 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
649 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
652 * 0x7FF is the end of the queue list.
653 * This means we won't use MSI-X vector 0 for a queue interrupt
656 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
657 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
658 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
660 wr32(hw, I40E_PFINT_DYN_CTL0,
661 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
662 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
664 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
668 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
670 /* Display supported media types */
671 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
672 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
674 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
675 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
676 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
677 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
679 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
681 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
682 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
684 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
685 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
687 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
688 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
689 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
690 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
692 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
693 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
694 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
695 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
697 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
699 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
700 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
701 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
702 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
703 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
704 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
705 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
706 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
707 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
708 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
710 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
711 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
713 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
714 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
715 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
716 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
717 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
718 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
719 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
721 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
722 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
723 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
725 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
726 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
728 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
729 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
730 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
731 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
733 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
734 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
735 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
736 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
738 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
740 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
742 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
744 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
747 /*********************************************************************
749 * Get Firmware Switch configuration
750 * - this will need to be more robust when more complex
751 * switch configurations are enabled.
753 **********************************************************************/
755 ixl_switch_config(struct ixl_pf *pf)
757 struct i40e_hw *hw = &pf->hw;
758 struct ixl_vsi *vsi = &pf->vsi;
759 device_t dev = iflib_get_dev(vsi->ctx);
760 struct i40e_aqc_get_switch_config_resp *sw_config;
761 u8 aq_buf[I40E_AQ_LARGE_BUF];
765 memset(&aq_buf, 0, sizeof(aq_buf));
766 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
767 ret = i40e_aq_get_switch_config(hw, sw_config,
768 sizeof(aq_buf), &next, NULL);
770 device_printf(dev, "aq_get_switch_config() failed, error %d,"
771 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
774 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
776 "Switch config: header reported: %d in structure, %d total\n",
777 LE16_TO_CPU(sw_config->header.num_reported),
778 LE16_TO_CPU(sw_config->header.num_total));
780 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
782 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
783 sw_config->element[i].element_type,
784 LE16_TO_CPU(sw_config->element[i].seid),
785 LE16_TO_CPU(sw_config->element[i].uplink_seid),
786 LE16_TO_CPU(sw_config->element[i].downlink_seid));
789 /* Simplified due to a single VSI */
790 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
791 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
792 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
799 struct sysctl_oid *tree;
800 struct sysctl_oid_list *child;
801 struct sysctl_oid_list *vsi_list;
803 tree = device_get_sysctl_tree(vsi->dev);
804 child = SYSCTL_CHILDREN(tree);
805 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
806 CTLFLAG_RD, NULL, "VSI Number");
808 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
809 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
811 /* Copy of netstat RX errors counter for validation purposes */
812 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
813 CTLFLAG_RD, &vsi->ierrors,
817 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
821 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
822 * Writes to the ITR registers immediately.
825 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
827 struct ixl_pf *pf = (struct ixl_pf *)arg1;
828 device_t dev = pf->dev;
830 int requested_tx_itr;
832 requested_tx_itr = pf->tx_itr;
833 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
834 if ((error) || (req->newptr == NULL))
836 if (pf->dynamic_tx_itr) {
838 "Cannot set TX itr value while dynamic TX itr is enabled\n");
841 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
843 "Invalid TX itr value; value must be between 0 and %d\n",
848 pf->tx_itr = requested_tx_itr;
849 ixl_configure_tx_itr(pf);
855 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
856 * Writes to the ITR registers immediately.
859 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
861 struct ixl_pf *pf = (struct ixl_pf *)arg1;
862 device_t dev = pf->dev;
864 int requested_rx_itr;
866 requested_rx_itr = pf->rx_itr;
867 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
868 if ((error) || (req->newptr == NULL))
870 if (pf->dynamic_rx_itr) {
872 "Cannot set RX itr value while dynamic RX itr is enabled\n");
875 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
877 "Invalid RX itr value; value must be between 0 and %d\n",
882 pf->rx_itr = requested_rx_itr;
883 ixl_configure_rx_itr(pf);
889 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
890 struct sysctl_oid_list *child,
891 struct i40e_hw_port_stats *stats)
893 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
894 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
895 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
897 struct i40e_eth_stats *eth_stats = &stats->eth;
898 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
900 struct ixl_sysctl_info ctls[] =
902 {&stats->crc_errors, "crc_errors", "CRC Errors"},
903 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
904 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
905 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
906 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
907 /* Packet Reception Stats */
908 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
909 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
910 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
911 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
912 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
913 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
914 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
915 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
916 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
917 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
918 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
919 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
920 /* Packet Transmission Stats */
921 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
922 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
923 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
924 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
925 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
926 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
927 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
929 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
930 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
931 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
932 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
937 struct ixl_sysctl_info *entry = ctls;
938 while (entry->stat != 0)
940 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
941 CTLFLAG_RD, entry->stat,
948 ixl_set_rss_key(struct ixl_pf *pf)
950 struct i40e_hw *hw = &pf->hw;
951 struct ixl_vsi *vsi = &pf->vsi;
952 device_t dev = pf->dev;
953 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
954 enum i40e_status_code status;
957 /* Fetch the configured RSS key */
958 rss_getkey((uint8_t *) &rss_seed);
960 ixl_get_default_rss_key(rss_seed);
962 /* Fill out hash function seed */
963 if (hw->mac.type == I40E_MAC_X722) {
964 struct i40e_aqc_get_set_rss_key_data key_data;
965 bcopy(rss_seed, &key_data, 52);
966 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
969 "i40e_aq_set_rss_key status %s, error %s\n",
970 i40e_stat_str(hw, status),
971 i40e_aq_str(hw, hw->aq.asq_last_status));
973 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
974 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
979 * Configure enabled PCTYPES for RSS.
982 ixl_set_rss_pctypes(struct ixl_pf *pf)
984 struct i40e_hw *hw = &pf->hw;
985 u64 set_hena = 0, hena;
990 rss_hash_config = rss_gethashconfig();
991 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
992 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
993 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
994 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
995 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
997 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
999 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1001 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1003 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1006 if (hw->mac.type == I40E_MAC_X722)
1007 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1009 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1011 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1012 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1014 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1015 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1020 ** Setup the PF's RSS parameters.
1023 ixl_config_rss(struct ixl_pf *pf)
1025 ixl_set_rss_key(pf);
1026 ixl_set_rss_pctypes(pf);
1027 ixl_set_rss_hlut(pf);
1031 * In some firmware versions there is default MAC/VLAN filter
1032 * configured which interferes with filters managed by driver.
1033 * Make sure it's removed.
1036 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1038 struct i40e_aqc_remove_macvlan_element_data e;
1040 bzero(&e, sizeof(e));
1041 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1043 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1044 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1046 bzero(&e, sizeof(e));
1047 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1049 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1050 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1051 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1055 ** Initialize filter list and add filters that the hardware
1056 ** needs to know about.
1058 ** Requires VSI's seid to be set before calling.
1061 ixl_init_filters(struct ixl_vsi *vsi)
1063 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1065 ixl_dbg_filter(pf, "%s: start\n", __func__);
1067 /* Initialize mac filter list for VSI */
1068 LIST_INIT(&vsi->ftl);
1069 vsi->num_hw_filters = 0;
1071 /* Receive broadcast Ethernet frames */
1072 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1074 if (IXL_VSI_IS_VF(vsi))
1077 ixl_del_default_hw_filters(vsi);
1079 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1082 * Prevent Tx flow control frames from being sent out by
1083 * non-firmware transmitters.
1084 * This affects every VSI in the PF.
1086 #ifndef IXL_DEBUG_FC
1087 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1089 if (pf->enable_tx_fc_filter)
1090 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1095 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1097 struct i40e_hw *hw = vsi->hw;
1098 struct ixl_ftl_head tmp;
1102 * The ixl_add_hw_filters function adds filters configured
1103 * in HW to a list in VSI. Move all filters to a temporary
1104 * list to avoid corrupting it by concatenating to itself.
1107 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1108 cnt = vsi->num_hw_filters;
1109 vsi->num_hw_filters = 0;
1111 ixl_add_hw_filters(vsi, &tmp, cnt);
1113 /* Filter could be removed if MAC address was changed */
1114 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1116 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1119 * VLAN HW filtering is enabled, make sure that filters
1120 * for all registered VLAN tags are configured
1122 ixl_add_vlan_filters(vsi, hw->mac.addr);
1126 * This routine adds a MAC/VLAN filter to the software filter
1127 * list, then adds that new filter to the HW if it doesn't already
1128 * exist in the SW filter list.
1131 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1133 struct ixl_mac_filter *f, *tmp;
1136 struct ixl_ftl_head to_add;
1143 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1144 MAC_FORMAT_ARGS(macaddr), vlan);
1146 /* Does one already exist */
1147 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1152 f = ixl_new_filter(&to_add, macaddr, vlan);
1154 device_printf(dev, "WARNING: no filter available!!\n");
1157 if (f->vlan != IXL_VLAN_ANY)
1158 f->flags |= IXL_FILTER_VLAN;
1163 ** Is this the first vlan being registered, if so we
1164 ** need to remove the ANY filter that indicates we are
1165 ** not in a vlan, and replace that with a 0 filter.
1167 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1168 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1170 struct ixl_ftl_head to_del;
1172 /* Prepare new filter first to avoid removing
1173 * VLAN_ANY filter if allocation fails */
1174 f = ixl_new_filter(&to_add, macaddr, 0);
1176 device_printf(dev, "WARNING: no filter available!!\n");
1177 free(LIST_FIRST(&to_add), M_IXL);
1182 LIST_REMOVE(tmp, ftle);
1184 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1185 ixl_del_hw_filters(vsi, &to_del, 1);
1189 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1193 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1194 * @vsi: pointer to VSI
1195 * @macaddr: MAC address
1197 * Adds MAC/VLAN filter for each VLAN configured on the interface
1198 * if there is enough HW filters. Otherwise adds a single filter
1199 * for all tagged and untagged frames to allow all configured VLANs
1200 * to recieve traffic.
1203 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1205 struct ixl_ftl_head to_add;
1206 struct ixl_mac_filter *f;
1210 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1211 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1216 /* Add filter for untagged frames if it does not exist yet */
1217 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1219 f = ixl_new_filter(&to_add, macaddr, 0);
1221 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1227 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1228 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1232 /* Does one already exist */
1233 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1237 f = ixl_new_filter(&to_add, macaddr, vlan);
1239 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1240 ixl_free_filters(&to_add);
1246 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1250 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1252 struct ixl_mac_filter *f, *tmp;
1253 struct ixl_ftl_head ftl_head;
1256 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1257 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1258 MAC_FORMAT_ARGS(macaddr), vlan);
1260 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1264 LIST_REMOVE(f, ftle);
1265 LIST_INIT(&ftl_head);
1266 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1267 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1270 /* If this is not the last vlan just remove the filter */
1271 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1272 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1276 /* It's the last vlan, we need to switch back to a non-vlan filter */
1277 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1279 LIST_REMOVE(tmp, ftle);
1280 LIST_INSERT_AFTER(f, tmp, ftle);
1283 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1285 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1289 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1290 * @vsi: VSI which filters need to be removed
1291 * @macaddr: MAC address
1293 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1294 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1295 * so skip them to speed up processing. Those filters should be removed
1296 * using ixl_del_filter function.
1299 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1301 struct ixl_mac_filter *f, *tmp;
1302 struct ixl_ftl_head to_del;
1307 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1308 if ((f->flags & IXL_FILTER_MC) != 0 ||
1309 !ixl_ether_is_equal(f->macaddr, macaddr))
1312 LIST_REMOVE(f, ftle);
1313 LIST_INSERT_HEAD(&to_del, f, ftle);
1317 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1318 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1319 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1321 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1325 ** Find the filter with both matching mac addr and vlan id
1327 struct ixl_mac_filter *
1328 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1330 struct ixl_mac_filter *f;
1332 LIST_FOREACH(f, headp, ftle) {
1333 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1334 (f->vlan == vlan)) {
1343 ** This routine takes additions to the vsi filter
1344 ** table and creates an Admin Queue call to create
1345 ** the filters in the hardware.
1348 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1350 struct i40e_aqc_add_macvlan_element_data *a, *b;
1351 struct ixl_mac_filter *f, *fn;
1355 enum i40e_status_code status;
1362 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1365 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1369 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1370 M_IXL, M_NOWAIT | M_ZERO);
1372 device_printf(dev, "add_hw_filters failed to get memory\n");
1376 LIST_FOREACH(f, to_add, ftle) {
1377 b = &a[j]; // a pox on fvl long names :)
1378 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1379 if (f->vlan == IXL_VLAN_ANY) {
1381 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1383 b->vlan_tag = f->vlan;
1386 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1387 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1388 MAC_FORMAT_ARGS(f->macaddr));
1394 /* Something went wrong */
1396 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1398 ixl_free_filters(to_add);
1402 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1403 if (status == I40E_SUCCESS) {
1404 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1405 vsi->num_hw_filters += j;
1410 "i40e_aq_add_macvlan status %s, error %s\n",
1411 i40e_stat_str(hw, status),
1412 i40e_aq_str(hw, hw->aq.asq_last_status));
1415 /* Verify which filters were actually configured in HW
1416 * and add them to the list */
1417 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1418 LIST_REMOVE(f, ftle);
1419 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1421 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1423 MAC_FORMAT_ARGS(f->macaddr),
1427 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1428 vsi->num_hw_filters++;
1438 ** This routine takes removals in the vsi filter
1439 ** table and creates an Admin Queue call to delete
1440 ** the filters in the hardware.
1443 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1445 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1449 struct ixl_mac_filter *f, *f_temp;
1450 enum i40e_status_code status;
1457 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1459 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1460 M_IXL, M_NOWAIT | M_ZERO);
1462 device_printf(dev, "%s: failed to get memory\n", __func__);
1466 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1467 e = &d[j]; // a pox on fvl long names :)
1468 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1469 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1470 if (f->vlan == IXL_VLAN_ANY) {
1472 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1474 e->vlan_tag = f->vlan;
1477 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1478 MAC_FORMAT_ARGS(f->macaddr));
1480 /* delete entry from the list */
1481 LIST_REMOVE(f, ftle);
1486 if (j != cnt || !LIST_EMPTY(to_del)) {
1487 /* Something went wrong */
1489 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1491 ixl_free_filters(to_del);
1494 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1497 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1498 __func__, i40e_stat_str(hw, status),
1499 i40e_aq_str(hw, hw->aq.asq_last_status));
1500 for (int i = 0; i < j; i++) {
1501 if (d[i].error_code == 0)
1504 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1505 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1510 vsi->num_hw_filters -= j;
1515 ixl_dbg_filter(pf, "%s: end\n", __func__);
1519 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1521 struct i40e_hw *hw = &pf->hw;
1526 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1528 ixl_dbg(pf, IXL_DBG_EN_DIS,
1529 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1532 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1534 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1535 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1536 I40E_QTX_ENA_QENA_STAT_MASK;
1537 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1538 /* Verify the enable took */
1539 for (int j = 0; j < 10; j++) {
1540 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1541 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1543 i40e_usec_delay(10);
1545 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1546 device_printf(pf->dev, "TX queue %d still disabled!\n",
1555 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1557 struct i40e_hw *hw = &pf->hw;
1562 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1564 ixl_dbg(pf, IXL_DBG_EN_DIS,
1565 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1568 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1569 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1570 I40E_QRX_ENA_QENA_STAT_MASK;
1571 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1572 /* Verify the enable took */
1573 for (int j = 0; j < 10; j++) {
1574 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1575 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1577 i40e_usec_delay(10);
1579 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1580 device_printf(pf->dev, "RX queue %d still disabled!\n",
1589 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1593 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1594 /* Called function already prints error message */
1597 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1602 * Returns error on first ring that is detected hung.
1605 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1607 struct i40e_hw *hw = &pf->hw;
1612 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1614 ixl_dbg(pf, IXL_DBG_EN_DIS,
1615 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1618 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1619 i40e_usec_delay(500);
1621 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1622 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1623 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1624 /* Verify the disable took */
1625 for (int j = 0; j < 10; j++) {
1626 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1627 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1629 i40e_msec_delay(10);
1631 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1632 device_printf(pf->dev, "TX queue %d still enabled!\n",
1641 * Returns error on first ring that is detected hung.
1644 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1646 struct i40e_hw *hw = &pf->hw;
1651 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1653 ixl_dbg(pf, IXL_DBG_EN_DIS,
1654 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1657 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1658 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1659 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1660 /* Verify the disable took */
1661 for (int j = 0; j < 10; j++) {
1662 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1663 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1665 i40e_msec_delay(10);
1667 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1668 device_printf(pf->dev, "RX queue %d still enabled!\n",
1677 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1681 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1682 /* Called function already prints error message */
1685 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1690 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1692 struct i40e_hw *hw = &pf->hw;
1693 device_t dev = pf->dev;
1695 bool mdd_detected = false;
1696 bool pf_mdd_detected = false;
1697 bool vf_mdd_detected = false;
1700 u8 pf_mdet_num, vp_mdet_num;
1703 /* find what triggered the MDD event */
1704 reg = rd32(hw, I40E_GL_MDET_TX);
1705 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1706 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1707 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1708 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1709 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1710 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1711 I40E_GL_MDET_TX_EVENT_SHIFT;
1712 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1713 I40E_GL_MDET_TX_QUEUE_SHIFT;
1714 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1715 mdd_detected = true;
1721 reg = rd32(hw, I40E_PF_MDET_TX);
1722 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1723 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1724 pf_mdet_num = hw->pf_id;
1725 pf_mdd_detected = true;
1728 /* Check if MDD was caused by a VF */
1729 for (int i = 0; i < pf->num_vfs; i++) {
1731 reg = rd32(hw, I40E_VP_MDET_TX(i));
1732 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1733 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1735 vf->num_mdd_events++;
1736 vf_mdd_detected = true;
1740 /* Print out an error message */
1741 if (vf_mdd_detected && pf_mdd_detected)
1743 "Malicious Driver Detection event %d"
1744 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1745 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1746 else if (vf_mdd_detected && !pf_mdd_detected)
1748 "Malicious Driver Detection event %d"
1749 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1750 event, queue, pf_num, vf_num, vp_mdet_num);
1751 else if (!vf_mdd_detected && pf_mdd_detected)
1753 "Malicious Driver Detection event %d"
1754 " on TX queue %d, pf number %d (PF-%d)\n",
1755 event, queue, pf_num, pf_mdet_num);
1756 /* Theoretically shouldn't happen */
1759 "TX Malicious Driver Detection event (unknown)\n");
1763 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1765 struct i40e_hw *hw = &pf->hw;
1766 device_t dev = pf->dev;
1768 bool mdd_detected = false;
1769 bool pf_mdd_detected = false;
1770 bool vf_mdd_detected = false;
1773 u8 pf_mdet_num, vp_mdet_num;
1777 * GL_MDET_RX doesn't contain VF number information, unlike
1780 reg = rd32(hw, I40E_GL_MDET_RX);
1781 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1782 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1783 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1784 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1785 I40E_GL_MDET_RX_EVENT_SHIFT;
1786 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1787 I40E_GL_MDET_RX_QUEUE_SHIFT;
1788 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1789 mdd_detected = true;
1795 reg = rd32(hw, I40E_PF_MDET_RX);
1796 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1797 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1798 pf_mdet_num = hw->pf_id;
1799 pf_mdd_detected = true;
1802 /* Check if MDD was caused by a VF */
1803 for (int i = 0; i < pf->num_vfs; i++) {
1805 reg = rd32(hw, I40E_VP_MDET_RX(i));
1806 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1807 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1809 vf->num_mdd_events++;
1810 vf_mdd_detected = true;
1814 /* Print out an error message */
1815 if (vf_mdd_detected && pf_mdd_detected)
1817 "Malicious Driver Detection event %d"
1818 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1819 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1820 else if (vf_mdd_detected && !pf_mdd_detected)
1822 "Malicious Driver Detection event %d"
1823 " on RX queue %d, pf number %d, (VF-%d)\n",
1824 event, queue, pf_num, vp_mdet_num);
1825 else if (!vf_mdd_detected && pf_mdd_detected)
1827 "Malicious Driver Detection event %d"
1828 " on RX queue %d, pf number %d (PF-%d)\n",
1829 event, queue, pf_num, pf_mdet_num);
1830 /* Theoretically shouldn't happen */
1833 "RX Malicious Driver Detection event (unknown)\n");
1837 * ixl_handle_mdd_event
1839 * Called from interrupt handler to identify possibly malicious vfs
1840 * (But also detects events from the PF, as well)
1843 ixl_handle_mdd_event(struct ixl_pf *pf)
1845 struct i40e_hw *hw = &pf->hw;
1849 * Handle both TX/RX because it's possible they could
1850 * both trigger in the same interrupt.
1852 ixl_handle_tx_mdd_event(pf);
1853 ixl_handle_rx_mdd_event(pf);
1855 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1857 /* re-enable mdd interrupt cause */
1858 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1859 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1860 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1865 ixl_enable_intr0(struct i40e_hw *hw)
1869 /* Use IXL_ITR_NONE so ITR isn't updated here */
1870 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1871 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1872 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1873 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1877 ixl_disable_intr0(struct i40e_hw *hw)
1881 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1882 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1887 ixl_enable_queue(struct i40e_hw *hw, int id)
1891 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1892 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1893 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1894 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1898 ixl_disable_queue(struct i40e_hw *hw, int id)
1902 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1903 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1907 ixl_handle_empr_reset(struct ixl_pf *pf)
1909 struct ixl_vsi *vsi = &pf->vsi;
1910 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1912 ixl_prepare_for_reset(pf, is_up);
1914 * i40e_pf_reset checks the type of reset and acts
1915 * accordingly. If EMP or Core reset was performed
1916 * doing PF reset is not necessary and it sometimes
1921 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1922 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1923 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1924 device_printf(pf->dev,
1925 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1926 pf->link_up = FALSE;
1927 ixl_update_link_status(pf);
1930 ixl_rebuild_hw_structs_after_reset(pf, is_up);
1932 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1936 ixl_update_stats_counters(struct ixl_pf *pf)
1938 struct i40e_hw *hw = &pf->hw;
1939 struct ixl_vsi *vsi = &pf->vsi;
1941 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1943 struct i40e_hw_port_stats *nsd = &pf->stats;
1944 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1946 /* Update hw stats */
1947 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1948 pf->stat_offsets_loaded,
1949 &osd->crc_errors, &nsd->crc_errors);
1950 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1951 pf->stat_offsets_loaded,
1952 &osd->illegal_bytes, &nsd->illegal_bytes);
1953 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1954 I40E_GLPRT_GORCL(hw->port),
1955 pf->stat_offsets_loaded,
1956 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1957 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1958 I40E_GLPRT_GOTCL(hw->port),
1959 pf->stat_offsets_loaded,
1960 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1961 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1962 pf->stat_offsets_loaded,
1963 &osd->eth.rx_discards,
1964 &nsd->eth.rx_discards);
1965 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1966 I40E_GLPRT_UPRCL(hw->port),
1967 pf->stat_offsets_loaded,
1968 &osd->eth.rx_unicast,
1969 &nsd->eth.rx_unicast);
1970 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1971 I40E_GLPRT_UPTCL(hw->port),
1972 pf->stat_offsets_loaded,
1973 &osd->eth.tx_unicast,
1974 &nsd->eth.tx_unicast);
1975 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1976 I40E_GLPRT_MPRCL(hw->port),
1977 pf->stat_offsets_loaded,
1978 &osd->eth.rx_multicast,
1979 &nsd->eth.rx_multicast);
1980 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1981 I40E_GLPRT_MPTCL(hw->port),
1982 pf->stat_offsets_loaded,
1983 &osd->eth.tx_multicast,
1984 &nsd->eth.tx_multicast);
1985 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1986 I40E_GLPRT_BPRCL(hw->port),
1987 pf->stat_offsets_loaded,
1988 &osd->eth.rx_broadcast,
1989 &nsd->eth.rx_broadcast);
1990 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1991 I40E_GLPRT_BPTCL(hw->port),
1992 pf->stat_offsets_loaded,
1993 &osd->eth.tx_broadcast,
1994 &nsd->eth.tx_broadcast);
1996 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1997 pf->stat_offsets_loaded,
1998 &osd->tx_dropped_link_down,
1999 &nsd->tx_dropped_link_down);
2000 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2001 pf->stat_offsets_loaded,
2002 &osd->mac_local_faults,
2003 &nsd->mac_local_faults);
2004 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2005 pf->stat_offsets_loaded,
2006 &osd->mac_remote_faults,
2007 &nsd->mac_remote_faults);
2008 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2009 pf->stat_offsets_loaded,
2010 &osd->rx_length_errors,
2011 &nsd->rx_length_errors);
2013 /* Flow control (LFC) stats */
2014 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2015 pf->stat_offsets_loaded,
2016 &osd->link_xon_rx, &nsd->link_xon_rx);
2017 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2018 pf->stat_offsets_loaded,
2019 &osd->link_xon_tx, &nsd->link_xon_tx);
2020 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2021 pf->stat_offsets_loaded,
2022 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2023 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2024 pf->stat_offsets_loaded,
2025 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2028 * For watchdog management we need to know if we have been paused
2029 * during the last interval, so capture that here.
2031 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2032 vsi->shared->isc_pause_frames = 1;
2034 /* Packet size stats rx */
2035 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2036 I40E_GLPRT_PRC64L(hw->port),
2037 pf->stat_offsets_loaded,
2038 &osd->rx_size_64, &nsd->rx_size_64);
2039 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2040 I40E_GLPRT_PRC127L(hw->port),
2041 pf->stat_offsets_loaded,
2042 &osd->rx_size_127, &nsd->rx_size_127);
2043 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2044 I40E_GLPRT_PRC255L(hw->port),
2045 pf->stat_offsets_loaded,
2046 &osd->rx_size_255, &nsd->rx_size_255);
2047 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2048 I40E_GLPRT_PRC511L(hw->port),
2049 pf->stat_offsets_loaded,
2050 &osd->rx_size_511, &nsd->rx_size_511);
2051 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2052 I40E_GLPRT_PRC1023L(hw->port),
2053 pf->stat_offsets_loaded,
2054 &osd->rx_size_1023, &nsd->rx_size_1023);
2055 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2056 I40E_GLPRT_PRC1522L(hw->port),
2057 pf->stat_offsets_loaded,
2058 &osd->rx_size_1522, &nsd->rx_size_1522);
2059 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2060 I40E_GLPRT_PRC9522L(hw->port),
2061 pf->stat_offsets_loaded,
2062 &osd->rx_size_big, &nsd->rx_size_big);
2064 /* Packet size stats tx */
2065 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2066 I40E_GLPRT_PTC64L(hw->port),
2067 pf->stat_offsets_loaded,
2068 &osd->tx_size_64, &nsd->tx_size_64);
2069 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2070 I40E_GLPRT_PTC127L(hw->port),
2071 pf->stat_offsets_loaded,
2072 &osd->tx_size_127, &nsd->tx_size_127);
2073 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2074 I40E_GLPRT_PTC255L(hw->port),
2075 pf->stat_offsets_loaded,
2076 &osd->tx_size_255, &nsd->tx_size_255);
2077 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2078 I40E_GLPRT_PTC511L(hw->port),
2079 pf->stat_offsets_loaded,
2080 &osd->tx_size_511, &nsd->tx_size_511);
2081 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2082 I40E_GLPRT_PTC1023L(hw->port),
2083 pf->stat_offsets_loaded,
2084 &osd->tx_size_1023, &nsd->tx_size_1023);
2085 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2086 I40E_GLPRT_PTC1522L(hw->port),
2087 pf->stat_offsets_loaded,
2088 &osd->tx_size_1522, &nsd->tx_size_1522);
2089 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2090 I40E_GLPRT_PTC9522L(hw->port),
2091 pf->stat_offsets_loaded,
2092 &osd->tx_size_big, &nsd->tx_size_big);
2094 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2095 pf->stat_offsets_loaded,
2096 &osd->rx_undersize, &nsd->rx_undersize);
2097 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2098 pf->stat_offsets_loaded,
2099 &osd->rx_fragments, &nsd->rx_fragments);
2100 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2101 pf->stat_offsets_loaded,
2102 &osd->rx_oversize, &nsd->rx_oversize);
2103 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2104 pf->stat_offsets_loaded,
2105 &osd->rx_jabber, &nsd->rx_jabber);
2107 i40e_get_phy_lpi_status(hw, nsd);
2109 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2110 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2111 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2113 pf->stat_offsets_loaded = true;
2116 /* Update vsi stats */
2117 ixl_update_vsi_stats(vsi);
2119 for (int i = 0; i < pf->num_vfs; i++) {
2121 if (vf->vf_flags & VF_FLAG_ENABLED)
2122 ixl_update_eth_stats(&pf->vfs[i].vsi);
2127 * Update VSI-specific ethernet statistics counters.
2130 ixl_update_eth_stats(struct ixl_vsi *vsi)
2132 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2133 struct i40e_hw *hw = &pf->hw;
2134 struct i40e_eth_stats *es;
2135 struct i40e_eth_stats *oes;
2136 u16 stat_idx = vsi->info.stat_counter_idx;
2138 es = &vsi->eth_stats;
2139 oes = &vsi->eth_stats_offsets;
2141 /* Gather up the stats that the hw collects */
2142 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2143 vsi->stat_offsets_loaded,
2144 &oes->tx_errors, &es->tx_errors);
2145 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2146 vsi->stat_offsets_loaded,
2147 &oes->rx_discards, &es->rx_discards);
2149 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2150 I40E_GLV_GORCL(stat_idx),
2151 vsi->stat_offsets_loaded,
2152 &oes->rx_bytes, &es->rx_bytes);
2153 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2154 I40E_GLV_UPRCL(stat_idx),
2155 vsi->stat_offsets_loaded,
2156 &oes->rx_unicast, &es->rx_unicast);
2157 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2158 I40E_GLV_MPRCL(stat_idx),
2159 vsi->stat_offsets_loaded,
2160 &oes->rx_multicast, &es->rx_multicast);
2161 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2162 I40E_GLV_BPRCL(stat_idx),
2163 vsi->stat_offsets_loaded,
2164 &oes->rx_broadcast, &es->rx_broadcast);
2166 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2167 I40E_GLV_GOTCL(stat_idx),
2168 vsi->stat_offsets_loaded,
2169 &oes->tx_bytes, &es->tx_bytes);
2170 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2171 I40E_GLV_UPTCL(stat_idx),
2172 vsi->stat_offsets_loaded,
2173 &oes->tx_unicast, &es->tx_unicast);
2174 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2175 I40E_GLV_MPTCL(stat_idx),
2176 vsi->stat_offsets_loaded,
2177 &oes->tx_multicast, &es->tx_multicast);
2178 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2179 I40E_GLV_BPTCL(stat_idx),
2180 vsi->stat_offsets_loaded,
2181 &oes->tx_broadcast, &es->tx_broadcast);
2182 vsi->stat_offsets_loaded = true;
2186 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2190 struct i40e_eth_stats *es;
2191 u64 tx_discards, csum_errs;
2193 struct i40e_hw_port_stats *nsd;
2197 es = &vsi->eth_stats;
2200 ixl_update_eth_stats(vsi);
2202 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2205 for (int i = 0; i < vsi->num_rx_queues; i++)
2206 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2207 nsd->checksum_error = csum_errs;
2209 /* Update ifnet stats */
2210 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2213 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2216 IXL_SET_IBYTES(vsi, es->rx_bytes);
2217 IXL_SET_OBYTES(vsi, es->tx_bytes);
2218 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2219 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2221 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2222 nsd->checksum_error + nsd->rx_length_errors +
2223 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2225 IXL_SET_OERRORS(vsi, es->tx_errors);
2226 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2227 IXL_SET_OQDROPS(vsi, tx_discards);
2228 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2229 IXL_SET_COLLISIONS(vsi, 0);
2233 * Reset all of the stats for the given pf
2236 ixl_pf_reset_stats(struct ixl_pf *pf)
2238 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2239 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2240 pf->stat_offsets_loaded = false;
2244 * Resets all stats of the given vsi
2247 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2249 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2250 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2251 vsi->stat_offsets_loaded = false;
2255 * Read and update a 48 bit stat from the hw
2257 * Since the device stats are not reset at PFReset, they likely will not
2258 * be zeroed when the driver starts. We'll save the first values read
2259 * and use them as offsets to be subtracted from the raw values in order
2260 * to report stats that count from zero.
2263 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2264 bool offset_loaded, u64 *offset, u64 *stat)
2268 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2269 new_data = rd64(hw, loreg);
2272 * Use two rd32's instead of one rd64; FreeBSD versions before
2273 * 10 don't support 64-bit bus reads/writes.
2275 new_data = rd32(hw, loreg);
2276 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2281 if (new_data >= *offset)
2282 *stat = new_data - *offset;
2284 *stat = (new_data + ((u64)1 << 48)) - *offset;
2285 *stat &= 0xFFFFFFFFFFFFULL;
2289 * Read and update a 32 bit stat from the hw
2292 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2293 bool offset_loaded, u64 *offset, u64 *stat)
2297 new_data = rd32(hw, reg);
2300 if (new_data >= *offset)
2301 *stat = (u32)(new_data - *offset);
2303 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2307 * Add subset of device sysctls safe to use in recovery mode
2310 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2312 device_t dev = pf->dev;
2314 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2315 struct sysctl_oid_list *ctx_list =
2316 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2318 struct sysctl_oid *debug_node;
2319 struct sysctl_oid_list *debug_list;
2321 SYSCTL_ADD_PROC(ctx, ctx_list,
2322 OID_AUTO, "fw_version",
2323 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2324 ixl_sysctl_show_fw, "A", "Firmware version");
2326 /* Add sysctls meant to print debug information, but don't list them
2327 * in "sysctl -a" output. */
2328 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2329 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2331 debug_list = SYSCTL_CHILDREN(debug_node);
2333 SYSCTL_ADD_UINT(ctx, debug_list,
2334 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2335 &pf->hw.debug_mask, 0, "Shared code debug message level");
2337 SYSCTL_ADD_UINT(ctx, debug_list,
2338 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2339 &pf->dbg_mask, 0, "Non-shared code debug message level");
2341 SYSCTL_ADD_PROC(ctx, debug_list,
2342 OID_AUTO, "dump_debug_data",
2343 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2344 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2346 SYSCTL_ADD_PROC(ctx, debug_list,
2347 OID_AUTO, "do_pf_reset",
2348 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2349 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2351 SYSCTL_ADD_PROC(ctx, debug_list,
2352 OID_AUTO, "do_core_reset",
2353 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2354 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2356 SYSCTL_ADD_PROC(ctx, debug_list,
2357 OID_AUTO, "do_global_reset",
2358 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2359 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2361 SYSCTL_ADD_PROC(ctx, debug_list,
2362 OID_AUTO, "queue_interrupt_table",
2363 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2364 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2368 ixl_add_device_sysctls(struct ixl_pf *pf)
2370 device_t dev = pf->dev;
2371 struct i40e_hw *hw = &pf->hw;
2373 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2374 struct sysctl_oid_list *ctx_list =
2375 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2377 struct sysctl_oid *debug_node;
2378 struct sysctl_oid_list *debug_list;
2380 struct sysctl_oid *fec_node;
2381 struct sysctl_oid_list *fec_list;
2382 struct sysctl_oid *eee_node;
2383 struct sysctl_oid_list *eee_list;
2385 /* Set up sysctls */
2386 SYSCTL_ADD_PROC(ctx, ctx_list,
2387 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2388 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2390 SYSCTL_ADD_PROC(ctx, ctx_list,
2391 OID_AUTO, "advertise_speed",
2392 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2393 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2395 SYSCTL_ADD_PROC(ctx, ctx_list,
2396 OID_AUTO, "supported_speeds",
2397 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2398 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2400 SYSCTL_ADD_PROC(ctx, ctx_list,
2401 OID_AUTO, "current_speed",
2402 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2403 ixl_sysctl_current_speed, "A", "Current Port Speed");
2405 SYSCTL_ADD_PROC(ctx, ctx_list,
2406 OID_AUTO, "fw_version",
2407 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2408 ixl_sysctl_show_fw, "A", "Firmware version");
2410 SYSCTL_ADD_PROC(ctx, ctx_list,
2411 OID_AUTO, "unallocated_queues",
2412 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2413 ixl_sysctl_unallocated_queues, "I",
2414 "Queues not allocated to a PF or VF");
2416 SYSCTL_ADD_PROC(ctx, ctx_list,
2418 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2419 ixl_sysctl_pf_tx_itr, "I",
2420 "Immediately set TX ITR value for all queues");
2422 SYSCTL_ADD_PROC(ctx, ctx_list,
2424 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2425 ixl_sysctl_pf_rx_itr, "I",
2426 "Immediately set RX ITR value for all queues");
2428 SYSCTL_ADD_INT(ctx, ctx_list,
2429 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2430 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2432 SYSCTL_ADD_INT(ctx, ctx_list,
2433 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2434 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2436 /* Add FEC sysctls for 25G adapters */
2437 if (i40e_is_25G_device(hw->device_id)) {
2438 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2439 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2441 fec_list = SYSCTL_CHILDREN(fec_node);
2443 SYSCTL_ADD_PROC(ctx, fec_list,
2444 OID_AUTO, "fc_ability",
2445 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2446 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2448 SYSCTL_ADD_PROC(ctx, fec_list,
2449 OID_AUTO, "rs_ability",
2450 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2451 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2453 SYSCTL_ADD_PROC(ctx, fec_list,
2454 OID_AUTO, "fc_requested",
2455 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2456 ixl_sysctl_fec_fc_request, "I",
2457 "FC FEC mode requested on link");
2459 SYSCTL_ADD_PROC(ctx, fec_list,
2460 OID_AUTO, "rs_requested",
2461 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2462 ixl_sysctl_fec_rs_request, "I",
2463 "RS FEC mode requested on link");
2465 SYSCTL_ADD_PROC(ctx, fec_list,
2466 OID_AUTO, "auto_fec_enabled",
2467 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2468 ixl_sysctl_fec_auto_enable, "I",
2469 "Let FW decide FEC ability/request modes");
2472 SYSCTL_ADD_PROC(ctx, ctx_list,
2473 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2474 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2476 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2477 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2478 "Energy Efficient Ethernet (EEE) Sysctls");
2479 eee_list = SYSCTL_CHILDREN(eee_node);
2481 SYSCTL_ADD_PROC(ctx, eee_list,
2482 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2483 pf, 0, ixl_sysctl_eee_enable, "I",
2484 "Enable Energy Efficient Ethernet (EEE)");
2486 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2487 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2490 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2491 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2494 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2495 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2498 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2499 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2502 /* Add sysctls meant to print debug information, but don't list them
2503 * in "sysctl -a" output. */
2504 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2505 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2507 debug_list = SYSCTL_CHILDREN(debug_node);
2509 SYSCTL_ADD_UINT(ctx, debug_list,
2510 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2511 &pf->hw.debug_mask, 0, "Shared code debug message level");
2513 SYSCTL_ADD_UINT(ctx, debug_list,
2514 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2515 &pf->dbg_mask, 0, "Non-shared code debug message level");
2517 SYSCTL_ADD_PROC(ctx, debug_list,
2518 OID_AUTO, "link_status",
2519 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2520 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2522 SYSCTL_ADD_PROC(ctx, debug_list,
2523 OID_AUTO, "phy_abilities",
2524 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2525 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2527 SYSCTL_ADD_PROC(ctx, debug_list,
2528 OID_AUTO, "filter_list",
2529 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2530 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2532 SYSCTL_ADD_PROC(ctx, debug_list,
2533 OID_AUTO, "hw_res_alloc",
2534 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2535 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2537 SYSCTL_ADD_PROC(ctx, debug_list,
2538 OID_AUTO, "switch_config",
2539 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2540 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2542 SYSCTL_ADD_PROC(ctx, debug_list,
2543 OID_AUTO, "switch_vlans",
2544 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2545 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2547 SYSCTL_ADD_PROC(ctx, debug_list,
2548 OID_AUTO, "rss_key",
2549 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2550 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2552 SYSCTL_ADD_PROC(ctx, debug_list,
2553 OID_AUTO, "rss_lut",
2554 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2555 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2557 SYSCTL_ADD_PROC(ctx, debug_list,
2558 OID_AUTO, "rss_hena",
2559 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2560 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2562 SYSCTL_ADD_PROC(ctx, debug_list,
2563 OID_AUTO, "disable_fw_link_management",
2564 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2565 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2567 SYSCTL_ADD_PROC(ctx, debug_list,
2568 OID_AUTO, "dump_debug_data",
2569 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2570 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2572 SYSCTL_ADD_PROC(ctx, debug_list,
2573 OID_AUTO, "do_pf_reset",
2574 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2575 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2577 SYSCTL_ADD_PROC(ctx, debug_list,
2578 OID_AUTO, "do_core_reset",
2579 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2580 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2582 SYSCTL_ADD_PROC(ctx, debug_list,
2583 OID_AUTO, "do_global_reset",
2584 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2585 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2587 SYSCTL_ADD_PROC(ctx, debug_list,
2588 OID_AUTO, "queue_interrupt_table",
2589 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2590 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2593 SYSCTL_ADD_PROC(ctx, debug_list,
2594 OID_AUTO, "read_i2c_byte",
2595 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2596 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2598 SYSCTL_ADD_PROC(ctx, debug_list,
2599 OID_AUTO, "write_i2c_byte",
2600 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2601 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2603 SYSCTL_ADD_PROC(ctx, debug_list,
2604 OID_AUTO, "read_i2c_diag_data",
2605 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2606 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2611 * Primarily for finding out how many queues can be assigned to VFs,
2615 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2617 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2620 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2622 return sysctl_handle_int(oidp, NULL, queues, req);
2626 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2628 const char * link_speed_str[] = {
2641 switch (link_speed) {
2642 case I40E_LINK_SPEED_100MB:
2645 case I40E_LINK_SPEED_1GB:
2648 case I40E_LINK_SPEED_10GB:
2651 case I40E_LINK_SPEED_40GB:
2654 case I40E_LINK_SPEED_20GB:
2657 case I40E_LINK_SPEED_25GB:
2660 case I40E_LINK_SPEED_2_5GB:
2663 case I40E_LINK_SPEED_5GB:
2666 case I40E_LINK_SPEED_UNKNOWN:
2672 return (link_speed_str[index]);
2676 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2678 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2679 struct i40e_hw *hw = &pf->hw;
2682 ixl_update_link_status(pf);
2684 error = sysctl_handle_string(oidp,
2686 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2693 * Converts 8-bit speeds value to and from sysctl flags and
2694 * Admin Queue flags.
2697 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2699 #define SPEED_MAP_SIZE 8
2700 static u16 speedmap[SPEED_MAP_SIZE] = {
2701 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2702 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2703 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2704 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2705 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2706 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2707 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2708 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2712 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2714 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2716 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2723 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2725 struct i40e_hw *hw = &pf->hw;
2726 device_t dev = pf->dev;
2727 struct i40e_aq_get_phy_abilities_resp abilities;
2728 struct i40e_aq_set_phy_config config;
2729 enum i40e_status_code aq_error = 0;
2731 /* Get current capability information */
2732 aq_error = i40e_aq_get_phy_capabilities(hw,
2733 FALSE, FALSE, &abilities, NULL);
2736 "%s: Error getting phy capabilities %d,"
2737 " aq error: %d\n", __func__, aq_error,
2738 hw->aq.asq_last_status);
2742 /* Prepare new config */
2743 bzero(&config, sizeof(config));
2745 config.link_speed = speeds;
2747 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2748 config.phy_type = abilities.phy_type;
2749 config.phy_type_ext = abilities.phy_type_ext;
2750 config.abilities = abilities.abilities
2751 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2752 config.eee_capability = abilities.eee_capability;
2753 config.eeer = abilities.eeer_val;
2754 config.low_power_ctrl = abilities.d3_lpan;
2755 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2756 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2758 /* Do aq command & restart link */
2759 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2762 "%s: Error setting new phy config %d,"
2763 " aq error: %d\n", __func__, aq_error,
2764 hw->aq.asq_last_status);
2772 ** Supported link speeds
2784 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2786 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2787 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2789 return sysctl_handle_int(oidp, NULL, supported, req);
2793 ** Control link advertise speed:
2795 ** 0x1 - advertise 100 Mb
2796 ** 0x2 - advertise 1G
2797 ** 0x4 - advertise 10G
2798 ** 0x8 - advertise 20G
2799 ** 0x10 - advertise 25G
2800 ** 0x20 - advertise 40G
2801 ** 0x40 - advertise 2.5G
2802 ** 0x80 - advertise 5G
2804 ** Set to 0 to disable link
2807 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2809 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2810 device_t dev = pf->dev;
2811 u8 converted_speeds;
2812 int requested_ls = 0;
2815 /* Read in new mode */
2816 requested_ls = pf->advertised_speed;
2817 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2818 if ((error) || (req->newptr == NULL))
2820 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2821 device_printf(dev, "Interface is currently in FW recovery mode. "
2822 "Setting advertise speed not supported\n");
2826 /* Error out if bits outside of possible flag range are set */
2827 if ((requested_ls & ~((u8)0xFF)) != 0) {
2828 device_printf(dev, "Input advertised speed out of range; "
2829 "valid flags are: 0x%02x\n",
2830 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2834 /* Check if adapter supports input value */
2835 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2836 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2837 device_printf(dev, "Invalid advertised speed; "
2838 "valid flags are: 0x%02x\n",
2839 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2843 error = ixl_set_advertised_speeds(pf, requested_ls, false);
2847 pf->advertised_speed = requested_ls;
2848 ixl_update_link_status(pf);
2853 * Input: bitmap of enum i40e_aq_link_speed
2856 ixl_max_aq_speed_to_value(u8 link_speeds)
2858 if (link_speeds & I40E_LINK_SPEED_40GB)
2860 if (link_speeds & I40E_LINK_SPEED_25GB)
2862 if (link_speeds & I40E_LINK_SPEED_20GB)
2864 if (link_speeds & I40E_LINK_SPEED_10GB)
2866 if (link_speeds & I40E_LINK_SPEED_5GB)
2868 if (link_speeds & I40E_LINK_SPEED_2_5GB)
2869 return IF_Mbps(2500);
2870 if (link_speeds & I40E_LINK_SPEED_1GB)
2872 if (link_speeds & I40E_LINK_SPEED_100MB)
2873 return IF_Mbps(100);
2875 /* Minimum supported link speed */
2876 return IF_Mbps(100);
2880 ** Get the width and transaction speed of
2881 ** the bus this adapter is plugged into.
2884 ixl_get_bus_info(struct ixl_pf *pf)
2886 struct i40e_hw *hw = &pf->hw;
2887 device_t dev = pf->dev;
2889 u32 offset, num_ports;
2892 /* Some devices don't use PCIE */
2893 if (hw->mac.type == I40E_MAC_X722)
2896 /* Read PCI Express Capabilities Link Status Register */
2897 pci_find_cap(dev, PCIY_EXPRESS, &offset);
2898 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2900 /* Fill out hw struct with PCIE info */
2901 i40e_set_pci_config_data(hw, link);
2903 /* Use info to print out bandwidth messages */
2904 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2905 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2906 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2907 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2908 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2909 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2910 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2911 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2915 * If adapter is in slot with maximum supported speed,
2916 * no warning message needs to be printed out.
2918 if (hw->bus.speed >= i40e_bus_speed_8000
2919 && hw->bus.width >= i40e_bus_width_pcie_x8)
2922 num_ports = bitcount32(hw->func_caps.valid_functions);
2923 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2925 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2926 device_printf(dev, "PCI-Express bandwidth available"
2927 " for this device may be insufficient for"
2928 " optimal performance.\n");
2929 device_printf(dev, "Please move the device to a different"
2930 " PCI-e link with more lanes and/or higher"
2931 " transfer rate.\n");
2936 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2938 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2939 struct i40e_hw *hw = &pf->hw;
2942 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2943 ixl_nvm_version_str(hw, sbuf);
2951 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2953 u8 nvma_ptr = nvma->config & 0xFF;
2954 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2955 const char * cmd_str;
2957 switch (nvma->command) {
2959 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2960 nvma->offset == 0 && nvma->data_size == 1) {
2961 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2966 case I40E_NVM_WRITE:
2970 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2974 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2975 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2979 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2981 struct i40e_hw *hw = &pf->hw;
2982 struct i40e_nvm_access *nvma;
2983 device_t dev = pf->dev;
2984 enum i40e_status_code status = 0;
2985 size_t nvma_size, ifd_len, exp_len;
2988 DEBUGFUNC("ixl_handle_nvmupd_cmd");
2991 nvma_size = sizeof(struct i40e_nvm_access);
2992 ifd_len = ifd->ifd_len;
2994 if (ifd_len < nvma_size ||
2995 ifd->ifd_data == NULL) {
2996 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
2998 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
2999 __func__, ifd_len, nvma_size);
3000 device_printf(dev, "%s: data pointer: %p\n", __func__,
3005 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3006 err = copyin(ifd->ifd_data, nvma, ifd_len);
3008 device_printf(dev, "%s: Cannot get request from user space\n",
3014 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3015 ixl_print_nvm_cmd(dev, nvma);
3017 if (IXL_PF_IS_RESETTING(pf)) {
3019 while (count++ < 100) {
3020 i40e_msec_delay(100);
3021 if (!(IXL_PF_IS_RESETTING(pf)))
3026 if (IXL_PF_IS_RESETTING(pf)) {
3028 "%s: timeout waiting for EMP reset to finish\n",
3034 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3036 "%s: invalid request, data size not in supported range\n",
3043 * Older versions of the NVM update tool don't set ifd_len to the size
3044 * of the entire buffer passed to the ioctl. Check the data_size field
3045 * in the contained i40e_nvm_access struct and ensure everything is
3046 * copied in from userspace.
3048 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3050 if (ifd_len < exp_len) {
3052 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3053 err = copyin(ifd->ifd_data, nvma, ifd_len);
3055 device_printf(dev, "%s: Cannot get request from user space\n",
3062 // TODO: Might need a different lock here
3064 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3065 // IXL_PF_UNLOCK(pf);
3067 err = copyout(nvma, ifd->ifd_data, ifd_len);
3070 device_printf(dev, "%s: Cannot return data to user space\n",
3075 /* Let the nvmupdate report errors, show them only when debug is enabled */
3076 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3077 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3078 i40e_stat_str(hw, status), perrno);
3081 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3082 * to run this ioctl again. So use -EACCES for -EPERM instead.
3084 if (perrno == -EPERM)
3091 ixl_find_i2c_interface(struct ixl_pf *pf)
3093 struct i40e_hw *hw = &pf->hw;
3094 bool i2c_en, port_matched;
3097 for (int i = 0; i < 4; i++) {
3098 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3099 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3100 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3101 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3103 if (i2c_en && port_matched)
3111 ixl_phy_type_string(u32 bit_pos, bool ext)
3113 static char * phy_types_str[32] = {
3143 "1000BASE-T Optical",
3147 static char * ext_phy_types_str[8] = {
3158 if (ext && bit_pos > 7) return "Invalid_Ext";
3159 if (bit_pos > 31) return "Invalid";
3161 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3164 /* TODO: ERJ: I don't this is necessary anymore. */
3166 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3168 device_t dev = pf->dev;
3169 struct i40e_hw *hw = &pf->hw;
3170 struct i40e_aq_desc desc;
3171 enum i40e_status_code status;
3173 struct i40e_aqc_get_link_status *aq_link_status =
3174 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3176 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3177 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3178 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3181 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3182 __func__, i40e_stat_str(hw, status),
3183 i40e_aq_str(hw, hw->aq.asq_last_status));
3187 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3192 ixl_phy_type_string_ls(u8 val)
3195 return ixl_phy_type_string(val - 0x1F, true);
3197 return ixl_phy_type_string(val, false);
3201 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3203 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3204 device_t dev = pf->dev;
3208 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3210 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3214 struct i40e_aqc_get_link_status link_status;
3215 error = ixl_aq_get_link_status(pf, &link_status);
3221 sbuf_printf(buf, "\n"
3222 "PHY Type : 0x%02x<%s>\n"
3224 "Link info: 0x%02x\n"
3225 "AN info : 0x%02x\n"
3226 "Ext info : 0x%02x\n"
3227 "Loopback : 0x%02x\n"
3231 link_status.phy_type,
3232 ixl_phy_type_string_ls(link_status.phy_type),
3233 link_status.link_speed,
3234 link_status.link_info,
3235 link_status.an_info,
3236 link_status.ext_info,
3237 link_status.loopback,
3238 link_status.max_frame_size,
3240 link_status.power_desc);
3242 error = sbuf_finish(buf);
3244 device_printf(dev, "Error finishing sbuf: %d\n", error);
3251 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3253 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3254 struct i40e_hw *hw = &pf->hw;
3255 device_t dev = pf->dev;
3256 enum i40e_status_code status;
3257 struct i40e_aq_get_phy_abilities_resp abilities;
3261 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3263 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3267 status = i40e_aq_get_phy_capabilities(hw,
3268 FALSE, FALSE, &abilities, NULL);
3271 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3272 __func__, i40e_stat_str(hw, status),
3273 i40e_aq_str(hw, hw->aq.asq_last_status));
3278 sbuf_printf(buf, "\n"
3280 abilities.phy_type);
3282 if (abilities.phy_type != 0) {
3283 sbuf_printf(buf, "<");
3284 for (int i = 0; i < 32; i++)
3285 if ((1 << i) & abilities.phy_type)
3286 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3287 sbuf_printf(buf, ">");
3290 sbuf_printf(buf, "\nPHY Ext : %02x",
3291 abilities.phy_type_ext);
3293 if (abilities.phy_type_ext != 0) {
3294 sbuf_printf(buf, "<");
3295 for (int i = 0; i < 4; i++)
3296 if ((1 << i) & abilities.phy_type_ext)
3297 sbuf_printf(buf, "%s,",
3298 ixl_phy_type_string(i, true));
3299 sbuf_printf(buf, ">");
3302 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3303 if (abilities.link_speed != 0) {
3305 sbuf_printf(buf, " <");
3306 for (int i = 0; i < 8; i++) {
3307 link_speed = (1 << i) & abilities.link_speed;
3309 sbuf_printf(buf, "%s, ",
3310 ixl_link_speed_string(link_speed));
3312 sbuf_printf(buf, ">");
3315 sbuf_printf(buf, "\n"
3320 "ID : %02x %02x %02x %02x\n"
3321 "ModType : %02x %02x %02x\n"
3325 abilities.abilities, abilities.eee_capability,
3326 abilities.eeer_val, abilities.d3_lpan,
3327 abilities.phy_id[0], abilities.phy_id[1],
3328 abilities.phy_id[2], abilities.phy_id[3],
3329 abilities.module_type[0], abilities.module_type[1],
3330 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3331 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3332 abilities.ext_comp_code);
3334 error = sbuf_finish(buf);
3336 device_printf(dev, "Error finishing sbuf: %d\n", error);
3343 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3345 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3346 struct ixl_vsi *vsi = &pf->vsi;
3347 struct ixl_mac_filter *f;
3348 device_t dev = pf->dev;
3349 int error = 0, ftl_len = 0, ftl_counter = 0;
3353 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3355 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3359 sbuf_printf(buf, "\n");
3361 /* Print MAC filters */
3362 sbuf_printf(buf, "PF Filters:\n");
3363 LIST_FOREACH(f, &vsi->ftl, ftle)
3367 sbuf_printf(buf, "(none)\n");
3369 LIST_FOREACH(f, &vsi->ftl, ftle) {
3371 MAC_FORMAT ", vlan %4d, flags %#06x",
3372 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3373 /* don't print '\n' for last entry */
3374 if (++ftl_counter != ftl_len)
3375 sbuf_printf(buf, "\n");
3380 /* TODO: Give each VF its own filter list sysctl */
3382 if (pf->num_vfs > 0) {
3383 sbuf_printf(buf, "\n\n");
3384 for (int i = 0; i < pf->num_vfs; i++) {
3386 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3390 ftl_len = 0, ftl_counter = 0;
3391 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3392 LIST_FOREACH(f, &vsi->ftl, ftle)
3396 sbuf_printf(buf, "(none)\n");
3398 LIST_FOREACH(f, &vsi->ftl, ftle) {
3400 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3401 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3408 error = sbuf_finish(buf);
3410 device_printf(dev, "Error finishing sbuf: %d\n", error);
3416 #define IXL_SW_RES_SIZE 0x14
3418 ixl_res_alloc_cmp(const void *a, const void *b)
3420 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3421 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3422 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3424 return ((int)one->resource_type - (int)two->resource_type);
3428 * Longest string length: 25
3431 ixl_switch_res_type_string(u8 type)
3433 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3436 "Perfect Match MAC address",
3439 "Multicast hash entry",
3440 "Unicast hash entry",
3444 "VLAN Statistic Pool",
3447 "Inner VLAN Forward filter",
3456 if (type < IXL_SW_RES_SIZE)
3457 return ixl_switch_res_type_strings[type];
3459 return "(Reserved)";
3463 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3465 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3466 struct i40e_hw *hw = &pf->hw;
3467 device_t dev = pf->dev;
3469 enum i40e_status_code status;
3473 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3475 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3477 device_printf(dev, "Could not allocate sbuf for output.\n");
3481 bzero(resp, sizeof(resp));
3482 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3488 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3489 __func__, i40e_stat_str(hw, status),
3490 i40e_aq_str(hw, hw->aq.asq_last_status));
3495 /* Sort entries by type for display */
3496 qsort(resp, num_entries,
3497 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3498 &ixl_res_alloc_cmp);
3500 sbuf_cat(buf, "\n");
3501 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3503 " Type | Guaranteed | Total | Used | Un-allocated\n"
3504 " | (this) | (all) | (this) | (all) \n");
3505 for (int i = 0; i < num_entries; i++) {
3507 "%25s | %10d %5d %6d %12d",
3508 ixl_switch_res_type_string(resp[i].resource_type),
3512 resp[i].total_unalloced);
3513 if (i < num_entries - 1)
3514 sbuf_cat(buf, "\n");
3517 error = sbuf_finish(buf);
3519 device_printf(dev, "Error finishing sbuf: %d\n", error);
3525 enum ixl_sw_seid_offset {
3526 IXL_SW_SEID_EMP = 1,
3527 IXL_SW_SEID_MAC_START = 2,
3528 IXL_SW_SEID_MAC_END = 5,
3529 IXL_SW_SEID_PF_START = 16,
3530 IXL_SW_SEID_PF_END = 31,
3531 IXL_SW_SEID_VF_START = 32,
3532 IXL_SW_SEID_VF_END = 159,
3536 * Caller must init and delete sbuf; this function will clear and
3537 * finish it for caller.
3539 * Note: The SEID argument only applies for elements defined by FW at
3540 * power-on; these include the EMP, Ports, PFs and VFs.
3543 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3547 /* If SEID is in certain ranges, then we can infer the
3548 * mapping of SEID to switch element.
3550 if (seid == IXL_SW_SEID_EMP) {
3553 } else if (seid >= IXL_SW_SEID_MAC_START &&
3554 seid <= IXL_SW_SEID_MAC_END) {
3555 sbuf_printf(s, "MAC %2d",
3556 seid - IXL_SW_SEID_MAC_START);
3558 } else if (seid >= IXL_SW_SEID_PF_START &&
3559 seid <= IXL_SW_SEID_PF_END) {
3560 sbuf_printf(s, "PF %3d",
3561 seid - IXL_SW_SEID_PF_START);
3563 } else if (seid >= IXL_SW_SEID_VF_START &&
3564 seid <= IXL_SW_SEID_VF_END) {
3565 sbuf_printf(s, "VF %3d",
3566 seid - IXL_SW_SEID_VF_START);
3570 switch (element_type) {
3571 case I40E_AQ_SW_ELEM_TYPE_BMC:
3574 case I40E_AQ_SW_ELEM_TYPE_PV:
3577 case I40E_AQ_SW_ELEM_TYPE_VEB:
3580 case I40E_AQ_SW_ELEM_TYPE_PA:
3583 case I40E_AQ_SW_ELEM_TYPE_VSI:
3584 sbuf_printf(s, "VSI");
3593 return sbuf_data(s);
3597 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3599 const struct i40e_aqc_switch_config_element_resp *one, *two;
3600 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3601 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3603 return ((int)one->seid - (int)two->seid);
3607 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3609 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3610 struct i40e_hw *hw = &pf->hw;
3611 device_t dev = pf->dev;
3614 enum i40e_status_code status;
3617 u8 aq_buf[I40E_AQ_LARGE_BUF];
3619 struct i40e_aqc_switch_config_element_resp *elem;
3620 struct i40e_aqc_get_switch_config_resp *sw_config;
3621 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3623 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3625 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3629 status = i40e_aq_get_switch_config(hw, sw_config,
3630 sizeof(aq_buf), &next, NULL);
3633 "%s: aq_get_switch_config() error %s, aq error %s\n",
3634 __func__, i40e_stat_str(hw, status),
3635 i40e_aq_str(hw, hw->aq.asq_last_status));
3640 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3643 nmbuf = sbuf_new_auto();
3645 device_printf(dev, "Could not allocate sbuf for name output.\n");
3650 /* Sort entries by SEID for display */
3651 qsort(sw_config->element, sw_config->header.num_reported,
3652 sizeof(struct i40e_aqc_switch_config_element_resp),
3653 &ixl_sw_cfg_elem_seid_cmp);
3655 sbuf_cat(buf, "\n");
3656 /* Assuming <= 255 elements in switch */
3657 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3658 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3660 * Revision -- all elements are revision 1 for now
3663 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3664 " | | | (uplink)\n");
3665 for (int i = 0; i < sw_config->header.num_reported; i++) {
3666 elem = &sw_config->element[i];
3668 // "%4d (%8s) | %8s %8s %#8x",
3669 sbuf_printf(buf, "%4d", elem->seid);
3671 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3672 elem->element_type, elem->seid));
3673 sbuf_cat(buf, " | ");
3674 sbuf_printf(buf, "%4d", elem->uplink_seid);
3676 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3677 0, elem->uplink_seid));
3678 sbuf_cat(buf, " | ");
3679 sbuf_printf(buf, "%4d", elem->downlink_seid);
3681 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3682 0, elem->downlink_seid));
3683 sbuf_cat(buf, " | ");
3684 sbuf_printf(buf, "%8d", elem->connection_type);
3685 if (i < sw_config->header.num_reported - 1)
3686 sbuf_cat(buf, "\n");
3690 error = sbuf_finish(buf);
3692 device_printf(dev, "Error finishing sbuf: %d\n", error);
3700 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3702 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3703 struct i40e_hw *hw = &pf->hw;
3704 device_t dev = pf->dev;
3705 int requested_vlan = -1;
3706 enum i40e_status_code status = 0;
3709 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3710 if ((error) || (req->newptr == NULL))
3713 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3714 device_printf(dev, "Flags disallow setting of vlans\n");
3718 hw->switch_tag = requested_vlan;
3720 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3721 hw->switch_tag, hw->first_tag, hw->second_tag);
3722 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3725 "%s: aq_set_switch_config() error %s, aq error %s\n",
3726 __func__, i40e_stat_str(hw, status),
3727 i40e_aq_str(hw, hw->aq.asq_last_status));
3734 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3736 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3737 struct i40e_hw *hw = &pf->hw;
3738 device_t dev = pf->dev;
3741 enum i40e_status_code status;
3744 struct i40e_aqc_get_set_rss_key_data key_data;
3746 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3748 device_printf(dev, "Could not allocate sbuf for output.\n");
3752 bzero(&key_data, sizeof(key_data));
3754 sbuf_cat(buf, "\n");
3755 if (hw->mac.type == I40E_MAC_X722) {
3756 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3758 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3759 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3761 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3762 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3763 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
3767 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3769 error = sbuf_finish(buf);
3771 device_printf(dev, "Error finishing sbuf: %d\n", error);
3778 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3783 if (length < 1 || buf == NULL) return;
3785 int byte_stride = 16;
3786 int lines = length / byte_stride;
3787 int rem = length % byte_stride;
3791 for (i = 0; i < lines; i++) {
3792 width = (rem > 0 && i == lines - 1)
3793 ? rem : byte_stride;
3795 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3797 for (j = 0; j < width; j++)
3798 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3800 if (width < byte_stride) {
3801 for (k = 0; k < (byte_stride - width); k++)
3802 sbuf_printf(sb, " ");
3806 sbuf_printf(sb, "\n");
3810 for (j = 0; j < width; j++) {
3811 c = (char)buf[i * byte_stride + j];
3812 if (c < 32 || c > 126)
3813 sbuf_printf(sb, ".");
3815 sbuf_printf(sb, "%c", c);
3818 sbuf_printf(sb, "\n");
3824 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3826 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3827 struct i40e_hw *hw = &pf->hw;
3828 device_t dev = pf->dev;
3831 enum i40e_status_code status;
3835 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3837 device_printf(dev, "Could not allocate sbuf for output.\n");
3841 bzero(hlut, sizeof(hlut));
3842 sbuf_cat(buf, "\n");
3843 if (hw->mac.type == I40E_MAC_X722) {
3844 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3846 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3847 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3849 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3850 reg = rd32(hw, I40E_PFQF_HLUT(i));
3851 bcopy(®, &hlut[i << 2], 4);
3854 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3856 error = sbuf_finish(buf);
3858 device_printf(dev, "Error finishing sbuf: %d\n", error);
3865 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3867 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3868 struct i40e_hw *hw = &pf->hw;
3871 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3872 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3874 return sysctl_handle_long(oidp, NULL, hena, req);
3878 * Sysctl to disable firmware's link management
3880 * 1 - Disable link management on this port
3881 * 0 - Re-enable link management
3883 * On normal NVMs, firmware manages link by default.
3886 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3888 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3889 struct i40e_hw *hw = &pf->hw;
3890 device_t dev = pf->dev;
3891 int requested_mode = -1;
3892 enum i40e_status_code status = 0;
3895 /* Read in new mode */
3896 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
3897 if ((error) || (req->newptr == NULL))
3899 /* Check for sane value */
3900 if (requested_mode < 0 || requested_mode > 1) {
3901 device_printf(dev, "Valid modes are 0 or 1\n");
3906 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
3909 "%s: Error setting new phy debug mode %s,"
3910 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
3911 i40e_aq_str(hw, hw->aq.asq_last_status));
3919 * Read some diagnostic data from a (Q)SFP+ module
3921 * SFP A2 QSFP Lower Page
3922 * Temperature 96-97 22-23
3924 * TX power 102-103 34-35..40-41
3925 * RX power 104-105 50-51..56-57
3928 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
3930 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3931 device_t dev = pf->dev;
3936 if (req->oldptr == NULL) {
3937 error = SYSCTL_OUT(req, 0, 128);
3941 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
3943 device_printf(dev, "Error reading from i2c\n");
3947 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
3948 if (output == 0x3) {
3951 * - Internally calibrated data
3952 * - Diagnostic monitoring is implemented
3954 pf->read_i2c_byte(pf, 92, 0xA0, &output);
3955 if (!(output & 0x60)) {
3956 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
3960 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3962 for (u8 offset = 96; offset < 100; offset++) {
3963 pf->read_i2c_byte(pf, offset, 0xA2, &output);
3964 sbuf_printf(sbuf, "%02X ", output);
3966 for (u8 offset = 102; offset < 106; offset++) {
3967 pf->read_i2c_byte(pf, offset, 0xA2, &output);
3968 sbuf_printf(sbuf, "%02X ", output);
3970 } else if (output == 0xD || output == 0x11) {
3972 * QSFP+ modules are always internally calibrated, and must indicate
3973 * what types of diagnostic monitoring are implemented
3975 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3977 for (u8 offset = 22; offset < 24; offset++) {
3978 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3979 sbuf_printf(sbuf, "%02X ", output);
3981 for (u8 offset = 26; offset < 28; offset++) {
3982 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3983 sbuf_printf(sbuf, "%02X ", output);
3985 /* Read the data from the first lane */
3986 for (u8 offset = 34; offset < 36; offset++) {
3987 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3988 sbuf_printf(sbuf, "%02X ", output);
3990 for (u8 offset = 50; offset < 52; offset++) {
3991 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3992 sbuf_printf(sbuf, "%02X ", output);
3995 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4006 * Sysctl to read a byte from I2C bus.
4008 * Input: 32-bit value:
4009 * bits 0-7: device address (0xA0 or 0xA2)
4010 * bits 8-15: offset (0-255)
4011 * bits 16-31: unused
4012 * Output: 8-bit value read
4015 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4017 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4018 device_t dev = pf->dev;
4019 int input = -1, error = 0;
4020 u8 dev_addr, offset, output;
4022 /* Read in I2C read parameters */
4023 error = sysctl_handle_int(oidp, &input, 0, req);
4024 if ((error) || (req->newptr == NULL))
4026 /* Validate device address */
4027 dev_addr = input & 0xFF;
4028 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4031 offset = (input >> 8) & 0xFF;
4033 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4037 device_printf(dev, "%02X\n", output);
4042 * Sysctl to write a byte to the I2C bus.
4044 * Input: 32-bit value:
4045 * bits 0-7: device address (0xA0 or 0xA2)
4046 * bits 8-15: offset (0-255)
4047 * bits 16-23: value to write
4048 * bits 24-31: unused
4049 * Output: 8-bit value written
4052 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4054 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4055 device_t dev = pf->dev;
4056 int input = -1, error = 0;
4057 u8 dev_addr, offset, value;
4059 /* Read in I2C write parameters */
4060 error = sysctl_handle_int(oidp, &input, 0, req);
4061 if ((error) || (req->newptr == NULL))
4063 /* Validate device address */
4064 dev_addr = input & 0xFF;
4065 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4068 offset = (input >> 8) & 0xFF;
4069 value = (input >> 16) & 0xFF;
4071 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4075 device_printf(dev, "%02X written\n", value);
4080 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4081 u8 bit_pos, int *is_set)
4083 device_t dev = pf->dev;
4084 struct i40e_hw *hw = &pf->hw;
4085 enum i40e_status_code status;
4087 if (IXL_PF_IN_RECOVERY_MODE(pf))
4090 status = i40e_aq_get_phy_capabilities(hw,
4091 FALSE, FALSE, abilities, NULL);
4094 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4095 __func__, i40e_stat_str(hw, status),
4096 i40e_aq_str(hw, hw->aq.asq_last_status));
4100 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4105 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4106 u8 bit_pos, int set)
4108 device_t dev = pf->dev;
4109 struct i40e_hw *hw = &pf->hw;
4110 struct i40e_aq_set_phy_config config;
4111 enum i40e_status_code status;
4113 /* Set new PHY config */
4114 memset(&config, 0, sizeof(config));
4115 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4117 config.fec_config |= bit_pos;
4118 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4119 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4120 config.phy_type = abilities->phy_type;
4121 config.phy_type_ext = abilities->phy_type_ext;
4122 config.link_speed = abilities->link_speed;
4123 config.eee_capability = abilities->eee_capability;
4124 config.eeer = abilities->eeer_val;
4125 config.low_power_ctrl = abilities->d3_lpan;
4126 status = i40e_aq_set_phy_config(hw, &config, NULL);
4130 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4131 __func__, i40e_stat_str(hw, status),
4132 i40e_aq_str(hw, hw->aq.asq_last_status));
4141 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4143 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4144 int mode, error = 0;
4146 struct i40e_aq_get_phy_abilities_resp abilities;
4147 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4150 /* Read in new mode */
4151 error = sysctl_handle_int(oidp, &mode, 0, req);
4152 if ((error) || (req->newptr == NULL))
4155 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4159 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4161 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4162 int mode, error = 0;
4164 struct i40e_aq_get_phy_abilities_resp abilities;
4165 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4168 /* Read in new mode */
4169 error = sysctl_handle_int(oidp, &mode, 0, req);
4170 if ((error) || (req->newptr == NULL))
4173 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4177 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4179 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4180 int mode, error = 0;
4182 struct i40e_aq_get_phy_abilities_resp abilities;
4183 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4186 /* Read in new mode */
4187 error = sysctl_handle_int(oidp, &mode, 0, req);
4188 if ((error) || (req->newptr == NULL))
4191 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4195 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4197 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4198 int mode, error = 0;
4200 struct i40e_aq_get_phy_abilities_resp abilities;
4201 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4204 /* Read in new mode */
4205 error = sysctl_handle_int(oidp, &mode, 0, req);
4206 if ((error) || (req->newptr == NULL))
4209 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4213 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4215 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4216 int mode, error = 0;
4218 struct i40e_aq_get_phy_abilities_resp abilities;
4219 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4222 /* Read in new mode */
4223 error = sysctl_handle_int(oidp, &mode, 0, req);
4224 if ((error) || (req->newptr == NULL))
4227 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4231 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4233 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4234 struct i40e_hw *hw = &pf->hw;
4235 device_t dev = pf->dev;
4238 enum i40e_status_code status;
4240 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4242 device_printf(dev, "Could not allocate sbuf for output.\n");
4247 /* This amount is only necessary if reading the entire cluster into memory */
4248 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4249 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4250 if (final_buff == NULL) {
4251 device_printf(dev, "Could not allocate memory for output.\n");
4254 int final_buff_len = 0;
4260 u16 curr_buff_size = 4096;
4261 u8 curr_next_table = 0;
4262 u32 curr_next_index = 0;
4268 sbuf_cat(buf, "\n");
4271 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4272 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4274 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4275 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4279 /* copy info out of temp buffer */
4280 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4281 final_buff_len += ret_buff_size;
4283 if (ret_next_table != curr_next_table) {
4284 /* We're done with the current table; we can dump out read data. */
4285 sbuf_printf(buf, "%d:", curr_next_table);
4286 int bytes_printed = 0;
4287 while (bytes_printed <= final_buff_len) {
4288 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4289 bytes_printed += 16;
4291 sbuf_cat(buf, "\n");
4293 /* The entire cluster has been read; we're finished */
4294 if (ret_next_table == 0xFF)
4297 /* Otherwise clear the output buffer and continue reading */
4298 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4302 if (ret_next_index == 0xFFFFFFFF)
4305 bzero(dump_buf, sizeof(dump_buf));
4306 curr_next_table = ret_next_table;
4307 curr_next_index = ret_next_index;
4311 free(final_buff, M_IXL);
4313 error = sbuf_finish(buf);
4315 device_printf(dev, "Error finishing sbuf: %d\n", error);
4322 ixl_start_fw_lldp(struct ixl_pf *pf)
4324 struct i40e_hw *hw = &pf->hw;
4325 enum i40e_status_code status;
4327 status = i40e_aq_start_lldp(hw, false, NULL);
4328 if (status != I40E_SUCCESS) {
4329 switch (hw->aq.asq_last_status) {
4330 case I40E_AQ_RC_EEXIST:
4331 device_printf(pf->dev,
4332 "FW LLDP agent is already running\n");
4334 case I40E_AQ_RC_EPERM:
4335 device_printf(pf->dev,
4336 "Device configuration forbids SW from starting "
4337 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4338 "attribute to \"Enabled\" to use this sysctl\n");
4341 device_printf(pf->dev,
4342 "Starting FW LLDP agent failed: error: %s, %s\n",
4343 i40e_stat_str(hw, status),
4344 i40e_aq_str(hw, hw->aq.asq_last_status));
4349 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4354 ixl_stop_fw_lldp(struct ixl_pf *pf)
4356 struct i40e_hw *hw = &pf->hw;
4357 device_t dev = pf->dev;
4358 enum i40e_status_code status;
4360 if (hw->func_caps.npar_enable != 0) {
4362 "Disabling FW LLDP agent is not supported on this device\n");
4366 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4368 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4372 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4373 if (status != I40E_SUCCESS) {
4374 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4376 "Disabling FW LLDP agent failed: error: %s, %s\n",
4377 i40e_stat_str(hw, status),
4378 i40e_aq_str(hw, hw->aq.asq_last_status));
4382 device_printf(dev, "FW LLDP agent is already stopped\n");
4385 i40e_aq_set_dcb_parameters(hw, true, NULL);
4386 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4391 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4393 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4394 int state, new_state, error = 0;
4396 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4398 /* Read in new mode */
4399 error = sysctl_handle_int(oidp, &new_state, 0, req);
4400 if ((error) || (req->newptr == NULL))
4403 /* Already in requested state */
4404 if (new_state == state)
4408 return ixl_stop_fw_lldp(pf);
4410 return ixl_start_fw_lldp(pf);
4414 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4416 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4417 int state, new_state;
4418 int sysctl_handle_status = 0;
4419 enum i40e_status_code cmd_status;
4421 /* Init states' values */
4422 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4424 /* Get requested mode */
4425 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4426 if ((sysctl_handle_status) || (req->newptr == NULL))
4427 return (sysctl_handle_status);
4429 /* Check if state has changed */
4430 if (new_state == state)
4434 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4436 /* Save new state or report error */
4439 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4441 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4442 } else if (cmd_status == I40E_ERR_CONFIG)
4451 ixl_attach_get_link_status(struct ixl_pf *pf)
4453 struct i40e_hw *hw = &pf->hw;
4454 device_t dev = pf->dev;
4457 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4458 (hw->aq.fw_maj_ver < 4)) {
4459 i40e_msec_delay(75);
4460 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4462 device_printf(dev, "link restart failed, aq_err=%d\n",
4463 pf->hw.aq.asq_last_status);
4468 /* Determine link state */
4469 hw->phy.get_link_info = TRUE;
4470 i40e_get_link_status(hw, &pf->link_up);
4475 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4477 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4478 int requested = 0, error = 0;
4480 /* Read in new mode */
4481 error = sysctl_handle_int(oidp, &requested, 0, req);
4482 if ((error) || (req->newptr == NULL))
4485 /* Initiate the PF reset later in the admin task */
4486 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4492 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4494 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4495 struct i40e_hw *hw = &pf->hw;
4496 int requested = 0, error = 0;
4498 /* Read in new mode */
4499 error = sysctl_handle_int(oidp, &requested, 0, req);
4500 if ((error) || (req->newptr == NULL))
4503 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4509 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4511 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4512 struct i40e_hw *hw = &pf->hw;
4513 int requested = 0, error = 0;
4515 /* Read in new mode */
4516 error = sysctl_handle_int(oidp, &requested, 0, req);
4517 if ((error) || (req->newptr == NULL))
4520 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4526 * Print out mapping of TX queue indexes and Rx queue indexes
4530 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4532 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4533 struct ixl_vsi *vsi = &pf->vsi;
4534 device_t dev = pf->dev;
4538 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4539 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4541 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4543 device_printf(dev, "Could not allocate sbuf for output.\n");
4547 sbuf_cat(buf, "\n");
4548 for (int i = 0; i < vsi->num_rx_queues; i++) {
4549 rx_que = &vsi->rx_queues[i];
4550 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4552 for (int i = 0; i < vsi->num_tx_queues; i++) {
4553 tx_que = &vsi->tx_queues[i];
4554 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4557 error = sbuf_finish(buf);
4559 device_printf(dev, "Error finishing sbuf: %d\n", error);