1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
51 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 const char * const ixl_fc_string[6] = {
107 static char *ixl_fec_string[3] = {
109 "CL74 FC-FEC/BASE-R",
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
121 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
126 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 IXL_NVM_VERSION_HI_SHIFT,
131 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 IXL_NVM_VERSION_LO_SHIFT,
134 oem_ver, oem_build, oem_patch);
138 ixl_print_nvm_version(struct ixl_pf *pf)
140 struct i40e_hw *hw = &pf->hw;
141 device_t dev = pf->dev;
144 sbuf = sbuf_new_auto();
145 ixl_nvm_version_str(hw, sbuf);
147 device_printf(dev, "%s\n", sbuf_data(sbuf));
152 ixl_configure_tx_itr(struct ixl_pf *pf)
154 struct i40e_hw *hw = &pf->hw;
155 struct ixl_vsi *vsi = &pf->vsi;
156 struct ixl_tx_queue *que = vsi->tx_queues;
158 vsi->tx_itr_setting = pf->tx_itr;
160 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 struct tx_ring *txr = &que->txr;
163 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 vsi->tx_itr_setting);
165 txr->itr = vsi->tx_itr_setting;
166 txr->latency = IXL_AVE_LATENCY;
171 ixl_configure_rx_itr(struct ixl_pf *pf)
173 struct i40e_hw *hw = &pf->hw;
174 struct ixl_vsi *vsi = &pf->vsi;
175 struct ixl_rx_queue *que = vsi->rx_queues;
177 vsi->rx_itr_setting = pf->rx_itr;
179 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 struct rx_ring *rxr = &que->rxr;
182 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 vsi->rx_itr_setting);
184 rxr->itr = vsi->rx_itr_setting;
185 rxr->latency = IXL_AVE_LATENCY;
190 * Write PF ITR values to queue ITR registers.
193 ixl_configure_itr(struct ixl_pf *pf)
195 ixl_configure_tx_itr(pf);
196 ixl_configure_rx_itr(pf);
199 /*********************************************************************
201 * Get the hardware capabilities
203 **********************************************************************/
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
208 struct i40e_aqc_list_capabilities_element_resp *buf;
209 struct i40e_hw *hw = &pf->hw;
210 device_t dev = pf->dev;
211 enum i40e_status_code status;
212 int len, i2c_intfc_num;
216 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
218 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 device_printf(dev, "Unable to allocate cap memory\n");
224 /* This populates the hw struct */
225 status = i40e_aq_discover_capabilities(hw, buf, len,
226 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
228 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
230 /* retry once with a larger buffer */
234 } else if (status != I40E_SUCCESS) {
235 device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
241 * Some devices have both MDIO and I2C; since this isn't reported
242 * by the FW, check registers to see if an I2C interface exists.
244 i2c_intfc_num = ixl_find_i2c_interface(pf);
245 if (i2c_intfc_num != -1)
248 /* Determine functions to use for driver I2C accesses */
249 switch (pf->i2c_access_method) {
251 if (hw->mac.type == I40E_MAC_XL710 &&
252 hw->aq.api_maj_ver == 1 &&
253 hw->aq.api_min_ver >= 7) {
254 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
257 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
263 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
267 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
271 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
275 /* Should not happen */
276 device_printf(dev, "Error setting I2C access functions\n");
280 /* Print a subset of the capability information. */
282 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
293 /* For the set_advertise sysctl */
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
297 device_t dev = pf->dev;
300 /* Make sure to initialize the device to the complete list of
301 * supported speeds on driver load, to ensure unloading and
302 * reloading the driver will restore this value.
304 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
306 /* Non-fatal error */
307 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
312 pf->advertised_speed =
313 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
319 enum i40e_status_code status = 0;
320 struct i40e_hw *hw = &pf->hw;
321 device_t dev = pf->dev;
323 /* Shutdown LAN HMC */
324 if (hw->hmc.hmc_obj) {
325 status = i40e_shutdown_lan_hmc(hw);
328 "init: LAN HMC shutdown failure; status %s\n",
329 i40e_stat_str(hw, status));
334 /* Shutdown admin queue */
335 ixl_disable_intr0(hw);
336 status = i40e_shutdown_adminq(hw);
339 "init: Admin Queue shutdown failure; status %s\n",
340 i40e_stat_str(hw, status));
342 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
348 ixl_reset(struct ixl_pf *pf)
350 struct i40e_hw *hw = &pf->hw;
351 device_t dev = pf->dev;
355 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
357 error = i40e_pf_reset(hw);
359 device_printf(dev, "init: PF reset failure\n");
364 error = i40e_init_adminq(hw);
366 device_printf(dev, "init: Admin queue init failure;"
367 " status code %d\n", error);
372 i40e_clear_pxe_mode(hw);
375 error = ixl_get_hw_capabilities(pf);
377 device_printf(dev, "init: Error retrieving HW capabilities;"
378 " status code %d\n", error);
382 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 hw->func_caps.num_rx_qp, 0, 0);
385 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
391 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
393 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
399 // XXX: possible fix for panic, but our failure recovery is still broken
400 error = ixl_switch_config(pf);
402 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
407 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
410 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 " aq_err %d\n", error, hw->aq.asq_last_status);
416 error = i40e_set_fc(hw, &set_fc_err_mask, true);
418 device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
423 // XXX: (Rebuild VSIs?)
425 /* Firmware delay workaround */
426 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 (hw->aq.fw_maj_ver < 4)) {
429 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
431 device_printf(dev, "init: link restart failed, aq_err %d\n",
432 hw->aq.asq_last_status);
438 /* Re-enable admin queue interrupt */
440 ixl_configure_intr0_msix(pf);
441 ixl_enable_intr0(hw);
447 ixl_rebuild_hw_structs_after_reset(pf);
449 /* The PF reset should have cleared any critical errors */
450 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
453 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 reg |= IXL_ICR0_CRIT_ERR_MASK;
455 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
462 * TODO: Make sure this properly handles admin queue / single rx queue intr
467 struct ixl_pf *pf = arg;
468 struct i40e_hw *hw = &pf->hw;
469 struct ixl_vsi *vsi = &pf->vsi;
470 struct ixl_rx_queue *que = vsi->rx_queues;
476 // TODO: Check against proper field
478 /* Clear PBA at start of ISR if using legacy interrupts */
480 wr32(hw, I40E_PFINT_DYN_CTL0,
481 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
485 icr0 = rd32(hw, I40E_PFINT_ICR0);
489 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 iflib_iov_intr_deferred(vsi->ctx);
493 // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 iflib_admin_intr_deferred(vsi->ctx);
497 // TODO: Is intr0 enabled somewhere else?
498 ixl_enable_intr0(hw);
500 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 return (FILTER_SCHEDULE_THREAD);
503 return (FILTER_HANDLED);
507 /*********************************************************************
509 * MSI-X VSI Interrupt Service routine
511 **********************************************************************/
513 ixl_msix_que(void *arg)
515 struct ixl_rx_queue *rx_que = arg;
519 ixl_set_queue_rx_itr(rx_que);
520 // ixl_set_queue_tx_itr(que);
522 return (FILTER_SCHEDULE_THREAD);
526 /*********************************************************************
528 * MSI-X Admin Queue Interrupt Service routine
530 **********************************************************************/
532 ixl_msix_adminq(void *arg)
534 struct ixl_pf *pf = arg;
535 struct i40e_hw *hw = &pf->hw;
536 device_t dev = pf->dev;
537 u32 reg, mask, rstat_reg;
538 bool do_task = FALSE;
540 DDPRINTF(dev, "begin");
544 reg = rd32(hw, I40E_PFINT_ICR0);
546 * For masking off interrupt causes that need to be handled before
547 * they can be re-enabled
549 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
551 /* Check on the cause */
552 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
557 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
563 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 device_printf(dev, "Reset Requested!\n");
566 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 device_printf(dev, "Reset type: ");
571 /* These others might be handled similarly to an EMPR reset */
572 case I40E_RESET_CORER:
575 case I40E_RESET_GLOBR:
578 case I40E_RESET_EMPR:
585 /* overload admin queue task to check reset progress */
586 atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
591 * PE / PCI / ECC exceptions are all handled in the same way:
592 * mask out these three causes, then request a PF reset
594 * TODO: I think at least ECC error requires a GLOBR, not PFR
596 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597 device_printf(dev, "ECC Error detected!\n");
598 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 device_printf(dev, "PCI Exception detected!\n");
600 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 /* Checks against the conditions above */
603 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 atomic_set_32(&pf->state,
606 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
610 // TODO: Linux driver never re-enables this interrupt once it has been detected
611 // Then what is supposed to happen? A PF reset? Should it never happen?
612 // TODO: Parse out this error into something human readable
613 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 device_printf(dev, "HMC Error detected!\n");
617 device_printf(dev, "INFO 0x%08x\n", reg);
618 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 device_printf(dev, "DATA 0x%08x\n", reg);
620 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
625 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 iflib_iov_intr_deferred(pf->vsi.ctx);
631 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 ixl_enable_intr0(hw);
635 return (FILTER_SCHEDULE_THREAD);
637 return (FILTER_HANDLED);
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
643 struct ixl_vsi *vsi = arg;
645 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
650 /*********************************************************************
653 * Routines for multicast and vlan filter management.
655 *********************************************************************/
657 ixl_add_multi(struct ixl_vsi *vsi)
659 struct ifnet *ifp = vsi->ifp;
660 struct i40e_hw *hw = vsi->hw;
663 IOCTL_DEBUGOUT("ixl_add_multi: begin");
666 ** First just get a count, to decide if we
667 ** we simply use multicast promiscuous.
669 mcnt = if_llmaddr_count(ifp);
670 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 /* delete existing MC filters */
672 ixl_del_hw_filters(vsi, mcnt);
673 i40e_aq_set_vsi_multicast_promiscuous(hw,
674 vsi->seid, TRUE, NULL);
678 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
680 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 ixl_add_hw_filters(vsi, flags, mcnt);
684 IOCTL_DEBUGOUT("ixl_add_multi: end");
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
690 struct ixl_mac_filter *f = arg;
692 if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
699 ixl_del_multi(struct ixl_vsi *vsi)
701 struct ifnet *ifp = vsi->ifp;
702 struct ixl_mac_filter *f;
705 IOCTL_DEBUGOUT("ixl_del_multi: begin");
707 SLIST_FOREACH(f, &vsi->ftl, next)
708 if ((f->flags & IXL_FILTER_USED) &&
709 (f->flags & IXL_FILTER_MC) &&
710 (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 f->flags |= IXL_FILTER_DEL;
716 ixl_del_hw_filters(vsi, mcnt);
722 ixl_link_up_msg(struct ixl_pf *pf)
724 struct i40e_hw *hw = &pf->hw;
725 struct ifnet *ifp = pf->vsi.ifp;
726 char *req_fec_string, *neg_fec_string;
729 fec_abilities = hw->phy.link_info.req_fec_info;
730 /* If both RS and KR are requested, only show RS */
731 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 req_fec_string = ixl_fec_string[0];
733 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 req_fec_string = ixl_fec_string[1];
736 req_fec_string = ixl_fec_string[2];
738 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 neg_fec_string = ixl_fec_string[0];
740 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 neg_fec_string = ixl_fec_string[1];
743 neg_fec_string = ixl_fec_string[2];
745 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
747 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 req_fec_string, neg_fec_string,
749 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 ixl_fc_string[1] : ixl_fc_string[0]);
758 * Configure admin queue/misc interrupt cause registers in hardware.
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
763 struct i40e_hw *hw = &pf->hw;
766 /* First set up the adminq - vector 0 */
767 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
768 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
770 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 I40E_PFINT_ICR0_ENA_GRST_MASK |
772 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
781 * 0x7FF is the end of the queue list.
782 * This means we won't use MSI-X vector 0 for a queue interrupt
785 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
789 wr32(hw, I40E_PFINT_DYN_CTL0,
790 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
793 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
797 * Configure queue interrupt cause registers in hardware.
799 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
804 struct i40e_hw *hw = &pf->hw;
805 struct ixl_vsi *vsi = &pf->vsi;
809 // TODO: See if max is really necessary
810 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 /* Make sure interrupt is disabled */
812 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 /* Set linked list head to point to corresponding RX queue
814 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
821 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 wr32(hw, I40E_QINT_RQCTL(i), reg);
828 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 wr32(hw, I40E_QINT_TQCTL(i), reg);
838 * Configure for single interrupt vector operation
841 ixl_configure_legacy(struct ixl_pf *pf)
843 struct i40e_hw *hw = &pf->hw;
844 struct ixl_vsi *vsi = &pf->vsi;
850 vsi->tx_itr_setting = pf->tx_itr;
851 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 vsi->tx_itr_setting);
853 txr->itr = vsi->tx_itr_setting;
855 vsi->rx_itr_setting = pf->rx_itr;
856 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 vsi->rx_itr_setting);
858 rxr->itr = vsi->rx_itr_setting;
859 /* XXX: Assuming only 1 queue in single interrupt mode */
861 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
863 /* Setup "other" causes */
864 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 | I40E_PFINT_ICR0_ENA_GRST_MASK
867 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
873 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
875 /* No ITR for non-queue interrupts */
876 wr32(hw, I40E_PFINT_STAT_CTL0,
877 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
879 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 wr32(hw, I40E_PFINT_LNKLST0, 0);
882 /* Associate the queue pair to the vector and enable the q int */
883 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 wr32(hw, I40E_QINT_RQCTL(0), reg);
888 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 wr32(hw, I40E_QINT_TQCTL(0), reg);
895 ixl_free_pci_resources(struct ixl_pf *pf)
897 struct ixl_vsi *vsi = &pf->vsi;
898 device_t dev = iflib_get_dev(vsi->ctx);
899 struct ixl_rx_queue *rx_que = vsi->rx_queues;
901 /* We may get here before stations are set up */
906 ** Release all MSI-X VSI resources:
908 iflib_irq_free(vsi->ctx, &vsi->irq);
910 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
913 if (pf->pci_mem != NULL)
914 bus_release_resource(dev, SYS_RES_MEMORY,
915 rman_get_rid(pf->pci_mem), pf->pci_mem);
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
921 /* Display supported media types */
922 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
925 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
932 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
937 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
944 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
955 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
958 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
970 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
973 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
978 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
992 /*********************************************************************
994 * Setup networking device structure and register an interface.
996 **********************************************************************/
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1000 struct ixl_vsi *vsi = &pf->vsi;
1001 if_ctx_t ctx = vsi->ctx;
1002 struct i40e_hw *hw = &pf->hw;
1003 struct ifnet *ifp = iflib_get_ifp(ctx);
1004 struct i40e_aq_get_phy_abilities_resp abilities;
1005 enum i40e_status_code aq_error = 0;
1007 INIT_DBG_DEV(dev, "begin");
1009 vsi->shared->isc_max_frame_size =
1010 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 + ETHER_VLAN_ENCAP_LEN;
1013 aq_error = i40e_aq_get_phy_capabilities(hw,
1014 FALSE, TRUE, &abilities, NULL);
1015 /* May need delay to detect fiber correctly */
1016 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 /* TODO: Maybe just retry this in a task... */
1018 i40e_msec_delay(200);
1019 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 TRUE, &abilities, NULL);
1023 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 device_printf(dev, "Unknown PHY type detected!\n");
1027 "Error getting supported media types, err %d,"
1028 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1030 pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1034 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1037 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1040 /* Use autoselect media by default */
1041 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1048 * Input: bitmap of enum i40e_aq_link_speed
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1053 if (link_speeds & I40E_LINK_SPEED_40GB)
1055 if (link_speeds & I40E_LINK_SPEED_25GB)
1057 if (link_speeds & I40E_LINK_SPEED_20GB)
1059 if (link_speeds & I40E_LINK_SPEED_10GB)
1061 if (link_speeds & I40E_LINK_SPEED_1GB)
1063 if (link_speeds & I40E_LINK_SPEED_100MB)
1064 return IF_Mbps(100);
1066 /* Minimum supported link speed */
1067 return IF_Mbps(100);
1071 ** Run when the Admin Queue gets a link state change interrupt.
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1076 struct i40e_hw *hw = &pf->hw;
1077 device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 struct i40e_aqc_get_link_status *status =
1079 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1081 /* Request link status from adapter */
1082 hw->phy.get_link_info = TRUE;
1083 i40e_get_link_status(hw, &pf->link_up);
1085 /* Print out message if an unqualified module is found */
1086 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 (pf->advertised_speed) &&
1088 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 (!(status->link_info & I40E_AQ_LINK_UP)))
1090 device_printf(dev, "Link failed because "
1091 "an unqualified module was detected!\n");
1093 /* OS link info is updated elsewhere */
1096 /*********************************************************************
1098 * Get Firmware Switch configuration
1099 * - this will need to be more robust when more complex
1100 * switch configurations are enabled.
1102 **********************************************************************/
1104 ixl_switch_config(struct ixl_pf *pf)
1106 struct i40e_hw *hw = &pf->hw;
1107 struct ixl_vsi *vsi = &pf->vsi;
1108 device_t dev = iflib_get_dev(vsi->ctx);
1109 struct i40e_aqc_get_switch_config_resp *sw_config;
1110 u8 aq_buf[I40E_AQ_LARGE_BUF];
1114 memset(&aq_buf, 0, sizeof(aq_buf));
1115 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 ret = i40e_aq_get_switch_config(hw, sw_config,
1117 sizeof(aq_buf), &next, NULL);
1119 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1123 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1125 "Switch config: header reported: %d in structure, %d total\n",
1126 sw_config->header.num_reported, sw_config->header.num_total);
1127 for (int i = 0; i < sw_config->header.num_reported; i++) {
1129 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 sw_config->element[i].element_type,
1131 sw_config->element[i].seid,
1132 sw_config->element[i].uplink_seid,
1133 sw_config->element[i].downlink_seid);
1136 /* Simplified due to a single VSI */
1137 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 vsi->seid = sw_config->element[0].seid;
1143 /*********************************************************************
1145 * Initialize the VSI: this handles contexts, which means things
1146 * like the number of descriptors, buffer size,
1147 * plus we init the rings thru this function.
1149 **********************************************************************/
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1153 struct ixl_pf *pf = vsi->back;
1154 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
1155 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1156 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1157 device_t dev = iflib_get_dev(vsi->ctx);
1158 struct i40e_hw *hw = vsi->hw;
1159 struct i40e_vsi_context ctxt;
1163 memset(&ctxt, 0, sizeof(ctxt));
1164 ctxt.seid = vsi->seid;
1165 if (pf->veb_seid != 0)
1166 ctxt.uplink_seid = pf->veb_seid;
1167 ctxt.pf_num = hw->pf_id;
1168 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1170 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 " aq_error %d\n", err, hw->aq.asq_last_status);
1174 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 ctxt.uplink_seid, ctxt.vsi_number,
1179 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1183 ** Set the queue and traffic class bits
1184 ** - when multiple traffic classes are supported
1185 ** this will need to be more robust.
1187 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 ctxt.info.queue_mapping[0] = 0;
1192 * This VSI will only use traffic class 0; start traffic class 0's
1193 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 * the driver may not use all of them).
1196 tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1202 /* Set VLAN receive stripping mode */
1203 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1208 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1211 /* Set TCP Enable for iWARP capable VSI */
1212 if (ixl_enable_iwarp && pf->iw_enabled) {
1213 ctxt.info.valid_sections |=
1214 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1218 /* Save VSI number and info for use later */
1219 vsi->vsi_num = ctxt.vsi_number;
1220 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1222 /* Reset VSI statistics */
1223 ixl_vsi_reset_stats(vsi);
1224 vsi->hw_filters_add = 0;
1225 vsi->hw_filters_del = 0;
1227 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1229 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1231 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 " aq_error %d\n", err, hw->aq.asq_last_status);
1236 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 struct tx_ring *txr = &tx_que->txr;
1238 struct i40e_hmc_obj_txq tctx;
1241 /* Setup the HMC TX Context */
1242 bzero(&tctx, sizeof(tctx));
1243 tctx.new_context = 1;
1244 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 tctx.qlen = scctx->isc_ntxd[0];
1246 tctx.fc_ena = 0; /* Disable FCoE */
1248 * This value needs to pulled from the VSI that this queue
1249 * is assigned to. Index into array is traffic class.
1251 tctx.rdylist = vsi->info.qs_handle[0];
1253 * Set these to enable Head Writeback
1254 * - Address is last entry in TX ring (reserved for HWB index)
1255 * Leave these as 0 for Descriptor Writeback
1257 if (vsi->enable_head_writeback) {
1258 tctx.head_wb_ena = 1;
1259 tctx.head_wb_addr = txr->tx_paddr +
1260 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1262 tctx.head_wb_ena = 0;
1263 tctx.head_wb_addr = 0;
1265 tctx.rdylist_act = 0;
1266 err = i40e_clear_lan_tx_queue_context(hw, i);
1268 device_printf(dev, "Unable to clear TX context\n");
1271 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1273 device_printf(dev, "Unable to set TX context\n");
1276 /* Associate the ring with this PF */
1277 txctl = I40E_QTX_CTL_PF_QUEUE;
1278 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 I40E_QTX_CTL_PF_INDX_MASK);
1280 wr32(hw, I40E_QTX_CTL(i), txctl);
1283 /* Do ring (re)init */
1284 ixl_init_tx_ring(vsi, tx_que);
1286 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 struct rx_ring *rxr = &rx_que->rxr;
1288 struct i40e_hmc_obj_rxq rctx;
1290 /* Next setup the HMC RX Context */
1291 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1293 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1295 /* Set up an RX context for the HMC */
1296 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1297 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1298 /* ignore header split for now */
1299 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1300 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1301 scctx->isc_max_frame_size : max_rxmax;
1303 rctx.dsize = 1; /* do 32byte descriptors */
1304 rctx.hsplit_0 = 0; /* no header split */
1305 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1306 rctx.qlen = scctx->isc_nrxd[0];
1307 rctx.tphrdesc_ena = 1;
1308 rctx.tphwdesc_ena = 1;
1309 rctx.tphdata_ena = 0; /* Header Split related */
1310 rctx.tphhead_ena = 0; /* Header Split related */
1311 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
1314 rctx.showiv = 1; /* Strip inner VLAN header */
1315 rctx.fc_ena = 0; /* Disable FCoE */
1316 rctx.prefena = 1; /* Prefetch descriptors */
1318 err = i40e_clear_lan_rx_queue_context(hw, i);
1321 "Unable to clear RX context %d\n", i);
1324 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1326 device_printf(dev, "Unable to set RX context %d\n", i);
1329 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1335 ixl_free_mac_filters(struct ixl_vsi *vsi)
1337 struct ixl_mac_filter *f;
1339 while (!SLIST_EMPTY(&vsi->ftl)) {
1340 f = SLIST_FIRST(&vsi->ftl);
1341 SLIST_REMOVE_HEAD(&vsi->ftl, next);
1347 ** Provide a update to the queue RX
1348 ** interrupt moderation value.
1351 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1353 struct ixl_vsi *vsi = que->vsi;
1354 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1355 struct i40e_hw *hw = vsi->hw;
1356 struct rx_ring *rxr = &que->rxr;
1361 /* Idle, do nothing */
1362 if (rxr->bytes == 0)
1365 if (pf->dynamic_rx_itr) {
1366 rx_bytes = rxr->bytes/rxr->itr;
1369 /* Adjust latency range */
1370 switch (rxr->latency) {
1371 case IXL_LOW_LATENCY:
1372 if (rx_bytes > 10) {
1373 rx_latency = IXL_AVE_LATENCY;
1374 rx_itr = IXL_ITR_20K;
1377 case IXL_AVE_LATENCY:
1378 if (rx_bytes > 20) {
1379 rx_latency = IXL_BULK_LATENCY;
1380 rx_itr = IXL_ITR_8K;
1381 } else if (rx_bytes <= 10) {
1382 rx_latency = IXL_LOW_LATENCY;
1383 rx_itr = IXL_ITR_100K;
1386 case IXL_BULK_LATENCY:
1387 if (rx_bytes <= 20) {
1388 rx_latency = IXL_AVE_LATENCY;
1389 rx_itr = IXL_ITR_20K;
1394 rxr->latency = rx_latency;
1396 if (rx_itr != rxr->itr) {
1397 /* do an exponential smoothing */
1398 rx_itr = (10 * rx_itr * rxr->itr) /
1399 ((9 * rx_itr) + rxr->itr);
1400 rxr->itr = min(rx_itr, IXL_MAX_ITR);
1401 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1402 rxr->me), rxr->itr);
1404 } else { /* We may have have toggled to non-dynamic */
1405 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1406 vsi->rx_itr_setting = pf->rx_itr;
1407 /* Update the hardware if needed */
1408 if (rxr->itr != vsi->rx_itr_setting) {
1409 rxr->itr = vsi->rx_itr_setting;
1410 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1411 rxr->me), rxr->itr);
1420 ** Provide a update to the queue TX
1421 ** interrupt moderation value.
1424 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1426 struct ixl_vsi *vsi = que->vsi;
1427 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1428 struct i40e_hw *hw = vsi->hw;
1429 struct tx_ring *txr = &que->txr;
1435 /* Idle, do nothing */
1436 if (txr->bytes == 0)
1439 if (pf->dynamic_tx_itr) {
1440 tx_bytes = txr->bytes/txr->itr;
1443 switch (txr->latency) {
1444 case IXL_LOW_LATENCY:
1445 if (tx_bytes > 10) {
1446 tx_latency = IXL_AVE_LATENCY;
1447 tx_itr = IXL_ITR_20K;
1450 case IXL_AVE_LATENCY:
1451 if (tx_bytes > 20) {
1452 tx_latency = IXL_BULK_LATENCY;
1453 tx_itr = IXL_ITR_8K;
1454 } else if (tx_bytes <= 10) {
1455 tx_latency = IXL_LOW_LATENCY;
1456 tx_itr = IXL_ITR_100K;
1459 case IXL_BULK_LATENCY:
1460 if (tx_bytes <= 20) {
1461 tx_latency = IXL_AVE_LATENCY;
1462 tx_itr = IXL_ITR_20K;
1467 txr->latency = tx_latency;
1469 if (tx_itr != txr->itr) {
1470 /* do an exponential smoothing */
1471 tx_itr = (10 * tx_itr * txr->itr) /
1472 ((9 * tx_itr) + txr->itr);
1473 txr->itr = min(tx_itr, IXL_MAX_ITR);
1474 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1475 txr->me), txr->itr);
1478 } else { /* We may have have toggled to non-dynamic */
1479 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1480 vsi->tx_itr_setting = pf->tx_itr;
1481 /* Update the hardware if needed */
1482 if (txr->itr != vsi->tx_itr_setting) {
1483 txr->itr = vsi->tx_itr_setting;
1484 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1485 txr->me), txr->itr);
1495 * ixl_sysctl_qtx_tail_handler
1496 * Retrieves I40E_QTX_TAIL value from hardware
1500 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1502 struct ixl_tx_queue *tx_que;
1506 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1507 if (!tx_que) return 0;
1509 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1510 error = sysctl_handle_int(oidp, &val, 0, req);
1511 if (error || !req->newptr)
1517 * ixl_sysctl_qrx_tail_handler
1518 * Retrieves I40E_QRX_TAIL value from hardware
1522 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1524 struct ixl_rx_queue *rx_que;
1528 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1529 if (!rx_que) return 0;
1531 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1532 error = sysctl_handle_int(oidp, &val, 0, req);
1533 if (error || !req->newptr)
1540 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1541 * Writes to the ITR registers immediately.
1544 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1546 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1547 device_t dev = pf->dev;
1549 int requested_tx_itr;
1551 requested_tx_itr = pf->tx_itr;
1552 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1553 if ((error) || (req->newptr == NULL))
1555 if (pf->dynamic_tx_itr) {
1557 "Cannot set TX itr value while dynamic TX itr is enabled\n");
1560 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1562 "Invalid TX itr value; value must be between 0 and %d\n",
1567 pf->tx_itr = requested_tx_itr;
1568 ixl_configure_tx_itr(pf);
1574 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1575 * Writes to the ITR registers immediately.
1578 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1580 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1581 device_t dev = pf->dev;
1583 int requested_rx_itr;
1585 requested_rx_itr = pf->rx_itr;
1586 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1587 if ((error) || (req->newptr == NULL))
1589 if (pf->dynamic_rx_itr) {
1591 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1594 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1596 "Invalid RX itr value; value must be between 0 and %d\n",
1601 pf->rx_itr = requested_rx_itr;
1602 ixl_configure_rx_itr(pf);
1608 ixl_add_hw_stats(struct ixl_pf *pf)
1610 struct ixl_vsi *vsi = &pf->vsi;
1611 device_t dev = iflib_get_dev(vsi->ctx);
1612 struct i40e_hw_port_stats *pf_stats = &pf->stats;
1614 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1615 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1616 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1618 /* Driver statistics */
1619 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1620 CTLFLAG_RD, &pf->admin_irq,
1621 "Admin Queue IRQs received");
1623 ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1625 ixl_add_queues_sysctls(dev, vsi);
1627 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1631 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1632 struct sysctl_oid_list *child,
1633 struct i40e_hw_port_stats *stats)
1635 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1636 CTLFLAG_RD, NULL, "Mac Statistics");
1637 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1639 struct i40e_eth_stats *eth_stats = &stats->eth;
1640 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1642 struct ixl_sysctl_info ctls[] =
1644 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1645 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1646 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1647 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1648 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1649 /* Packet Reception Stats */
1650 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1651 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1652 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1653 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1654 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1655 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1656 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1657 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1658 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1659 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1660 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1661 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1662 /* Packet Transmission Stats */
1663 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1664 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1665 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1666 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1667 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1668 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1669 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1671 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1672 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1673 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1674 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1679 struct ixl_sysctl_info *entry = ctls;
1680 while (entry->stat != 0)
1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1683 CTLFLAG_RD, entry->stat,
1684 entry->description);
1690 ixl_set_rss_key(struct ixl_pf *pf)
1692 struct i40e_hw *hw = &pf->hw;
1693 struct ixl_vsi *vsi = &pf->vsi;
1694 device_t dev = pf->dev;
1695 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1696 enum i40e_status_code status;
1699 /* Fetch the configured RSS key */
1700 rss_getkey((uint8_t *) &rss_seed);
1702 ixl_get_default_rss_key(rss_seed);
1704 /* Fill out hash function seed */
1705 if (hw->mac.type == I40E_MAC_X722) {
1706 struct i40e_aqc_get_set_rss_key_data key_data;
1707 bcopy(rss_seed, &key_data, 52);
1708 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1711 "i40e_aq_set_rss_key status %s, error %s\n",
1712 i40e_stat_str(hw, status),
1713 i40e_aq_str(hw, hw->aq.asq_last_status));
1715 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1716 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1721 * Configure enabled PCTYPES for RSS.
1724 ixl_set_rss_pctypes(struct ixl_pf *pf)
1726 struct i40e_hw *hw = &pf->hw;
1727 u64 set_hena = 0, hena;
1730 u32 rss_hash_config;
1732 rss_hash_config = rss_gethashconfig();
1733 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1734 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1735 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1736 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1737 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1738 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1739 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1740 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1741 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1742 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1743 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1744 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1745 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1746 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1748 if (hw->mac.type == I40E_MAC_X722)
1749 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1751 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1753 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1754 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1756 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1757 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1762 ixl_set_rss_hlut(struct ixl_pf *pf)
1764 struct i40e_hw *hw = &pf->hw;
1765 struct ixl_vsi *vsi = &pf->vsi;
1766 device_t dev = iflib_get_dev(vsi->ctx);
1768 int lut_entry_width;
1770 enum i40e_status_code status;
1772 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1774 /* Populate the LUT with max no. of queues in round robin fashion */
1776 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1779 * Fetch the RSS bucket id for the given indirection entry.
1780 * Cap it at the number of configured buckets (which is
1783 que_id = rss_get_indirection_to_bucket(i);
1784 que_id = que_id % vsi->num_rx_queues;
1786 que_id = i % vsi->num_rx_queues;
1788 lut = (que_id & ((0x1 << lut_entry_width) - 1));
1792 if (hw->mac.type == I40E_MAC_X722) {
1793 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1795 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1796 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1798 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1799 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1805 ** Setup the PF's RSS parameters.
1808 ixl_config_rss(struct ixl_pf *pf)
1810 ixl_set_rss_key(pf);
1811 ixl_set_rss_pctypes(pf);
1812 ixl_set_rss_hlut(pf);
1816 ** This routine updates vlan filters, called by init
1817 ** it scans the filter table and then updates the hw
1818 ** after a soft reset.
1821 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1823 struct ixl_mac_filter *f;
1826 if (vsi->num_vlans == 0)
1829 ** Scan the filter list for vlan entries,
1830 ** mark them for addition and then call
1831 ** for the AQ update.
1833 SLIST_FOREACH(f, &vsi->ftl, next) {
1834 if (f->flags & IXL_FILTER_VLAN) {
1842 printf("setup vlan: no filters found!\n");
1845 flags = IXL_FILTER_VLAN;
1846 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1847 ixl_add_hw_filters(vsi, flags, cnt);
1851 * In some firmware versions there is default MAC/VLAN filter
1852 * configured which interferes with filters managed by driver.
1853 * Make sure it's removed.
1856 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1858 struct i40e_aqc_remove_macvlan_element_data e;
1860 bzero(&e, sizeof(e));
1861 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1863 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1864 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1866 bzero(&e, sizeof(e));
1867 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1869 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1870 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1871 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1875 ** Initialize filter list and add filters that the hardware
1876 ** needs to know about.
1878 ** Requires VSI's filter list & seid to be set before calling.
1881 ixl_init_filters(struct ixl_vsi *vsi)
1883 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1885 /* Initialize mac filter list for VSI */
1886 SLIST_INIT(&vsi->ftl);
1888 /* Receive broadcast Ethernet frames */
1889 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1891 ixl_del_default_hw_filters(vsi);
1893 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1895 * Prevent Tx flow control frames from being sent out by
1896 * non-firmware transmitters.
1897 * This affects every VSI in the PF.
1899 if (pf->enable_tx_fc_filter)
1900 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1904 ** This routine adds mulicast filters
1907 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1909 struct ixl_mac_filter *f;
1911 /* Does one already exist */
1912 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1916 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1918 f->flags |= IXL_FILTER_MC;
1920 printf("WARNING: no filter available!!\n");
1924 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1926 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1930 * This routine adds a MAC/VLAN filter to the software filter
1931 * list, then adds that new filter to the HW if it doesn't already
1932 * exist in the SW filter list.
1935 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1937 struct ixl_mac_filter *f, *tmp;
1941 DEBUGOUT("ixl_add_filter: begin");
1946 /* Does one already exist */
1947 f = ixl_find_filter(vsi, macaddr, vlan);
1951 ** Is this the first vlan being registered, if so we
1952 ** need to remove the ANY filter that indicates we are
1953 ** not in a vlan, and replace that with a 0 filter.
1955 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1956 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1958 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 ixl_add_filter(vsi, macaddr, 0);
1963 f = ixl_new_filter(vsi, macaddr, vlan);
1965 device_printf(dev, "WARNING: no filter available!!\n");
1968 if (f->vlan != IXL_VLAN_ANY)
1969 f->flags |= IXL_FILTER_VLAN;
1973 f->flags |= IXL_FILTER_USED;
1974 ixl_add_hw_filters(vsi, f->flags, 1);
1978 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1980 struct ixl_mac_filter *f;
1982 f = ixl_find_filter(vsi, macaddr, vlan);
1986 f->flags |= IXL_FILTER_DEL;
1987 ixl_del_hw_filters(vsi, 1);
1988 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1991 /* Check if this is the last vlan removal */
1992 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1993 /* Switch back to a non-vlan filter */
1994 ixl_del_filter(vsi, macaddr, 0);
1995 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2001 ** Find the filter with both matching mac addr and vlan id
2003 struct ixl_mac_filter *
2004 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2006 struct ixl_mac_filter *f;
2008 SLIST_FOREACH(f, &vsi->ftl, next) {
2009 if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2010 && (f->vlan == vlan)) {
2019 ** This routine takes additions to the vsi filter
2020 ** table and creates an Admin Queue call to create
2021 ** the filters in the hardware.
2024 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2026 struct i40e_aqc_add_macvlan_element_data *a, *b;
2027 struct ixl_mac_filter *f;
2031 enum i40e_status_code status;
2039 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2043 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2044 M_DEVBUF, M_NOWAIT | M_ZERO);
2046 device_printf(dev, "add_hw_filters failed to get memory\n");
2051 ** Scan the filter list, each time we find one
2052 ** we add it to the admin queue array and turn off
2055 SLIST_FOREACH(f, &vsi->ftl, next) {
2056 if ((f->flags & flags) == flags) {
2057 b = &a[j]; // a pox on fvl long names :)
2058 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2059 if (f->vlan == IXL_VLAN_ANY) {
2061 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2063 b->vlan_tag = f->vlan;
2066 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2067 f->flags &= ~IXL_FILTER_ADD;
2070 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2071 MAC_FORMAT_ARGS(f->macaddr));
2077 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2079 device_printf(dev, "i40e_aq_add_macvlan status %s, "
2080 "error %s\n", i40e_stat_str(hw, status),
2081 i40e_aq_str(hw, hw->aq.asq_last_status));
2083 vsi->hw_filters_add += j;
2090 ** This routine takes removals in the vsi filter
2091 ** table and creates an Admin Queue call to delete
2092 ** the filters in the hardware.
2095 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2097 struct i40e_aqc_remove_macvlan_element_data *d, *e;
2101 struct ixl_mac_filter *f, *f_temp;
2102 enum i40e_status_code status;
2109 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2110 M_DEVBUF, M_NOWAIT | M_ZERO);
2112 device_printf(dev, "%s: failed to get memory\n", __func__);
2116 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2117 if (f->flags & IXL_FILTER_DEL) {
2118 e = &d[j]; // a pox on fvl long names :)
2119 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2120 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2121 if (f->vlan == IXL_VLAN_ANY) {
2123 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2125 e->vlan_tag = f->vlan;
2128 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2129 MAC_FORMAT_ARGS(f->macaddr));
2131 /* delete entry from vsi list */
2132 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2140 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2143 for (int i = 0; i < j; i++)
2144 sc += (!d[i].error_code);
2145 vsi->hw_filters_del += sc;
2147 "Failed to remove %d/%d filters, error %s\n",
2148 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2150 vsi->hw_filters_del += j;
2157 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2159 struct i40e_hw *hw = &pf->hw;
2164 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2166 ixl_dbg(pf, IXL_DBG_EN_DIS,
2167 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2170 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2172 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2173 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2174 I40E_QTX_ENA_QENA_STAT_MASK;
2175 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2176 /* Verify the enable took */
2177 for (int j = 0; j < 10; j++) {
2178 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2179 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2181 i40e_usec_delay(10);
2183 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2184 device_printf(pf->dev, "TX queue %d still disabled!\n",
2193 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2195 struct i40e_hw *hw = &pf->hw;
2200 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2202 ixl_dbg(pf, IXL_DBG_EN_DIS,
2203 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2206 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2207 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2208 I40E_QRX_ENA_QENA_STAT_MASK;
2209 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2210 /* Verify the enable took */
2211 for (int j = 0; j < 10; j++) {
2212 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2213 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2215 i40e_usec_delay(10);
2217 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2218 device_printf(pf->dev, "RX queue %d still disabled!\n",
2227 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2231 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2232 /* Called function already prints error message */
2235 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2239 /* For PF VSI only */
2241 ixl_enable_rings(struct ixl_vsi *vsi)
2243 struct ixl_pf *pf = vsi->back;
2246 for (int i = 0; i < vsi->num_tx_queues; i++)
2247 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2249 for (int i = 0; i < vsi->num_rx_queues; i++)
2250 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2256 * Returns error on first ring that is detected hung.
2259 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2261 struct i40e_hw *hw = &pf->hw;
2266 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2268 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2269 i40e_usec_delay(500);
2271 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2272 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2273 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2274 /* Verify the disable took */
2275 for (int j = 0; j < 10; j++) {
2276 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2277 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2279 i40e_msec_delay(10);
2281 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2282 device_printf(pf->dev, "TX queue %d still enabled!\n",
2291 * Returns error on first ring that is detected hung.
2294 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2296 struct i40e_hw *hw = &pf->hw;
2301 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2303 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2304 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2305 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2306 /* Verify the disable took */
2307 for (int j = 0; j < 10; j++) {
2308 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2309 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2311 i40e_msec_delay(10);
2313 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2314 device_printf(pf->dev, "RX queue %d still enabled!\n",
2323 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2327 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2328 /* Called function already prints error message */
2331 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2336 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2340 for (int i = 0; i < vsi->num_tx_queues; i++)
2341 error = ixl_disable_tx_ring(pf, qtag, i);
2343 for (int i = 0; i < vsi->num_rx_queues; i++)
2344 error = ixl_disable_rx_ring(pf, qtag, i);
2350 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2352 struct i40e_hw *hw = &pf->hw;
2353 device_t dev = pf->dev;
2355 bool mdd_detected = false;
2356 bool pf_mdd_detected = false;
2357 bool vf_mdd_detected = false;
2360 u8 pf_mdet_num, vp_mdet_num;
2363 /* find what triggered the MDD event */
2364 reg = rd32(hw, I40E_GL_MDET_TX);
2365 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2366 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2367 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2368 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2369 I40E_GL_MDET_TX_VF_NUM_SHIFT;
2370 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2371 I40E_GL_MDET_TX_EVENT_SHIFT;
2372 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2373 I40E_GL_MDET_TX_QUEUE_SHIFT;
2374 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2375 mdd_detected = true;
2381 reg = rd32(hw, I40E_PF_MDET_TX);
2382 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2383 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2384 pf_mdet_num = hw->pf_id;
2385 pf_mdd_detected = true;
2388 /* Check if MDD was caused by a VF */
2389 for (int i = 0; i < pf->num_vfs; i++) {
2391 reg = rd32(hw, I40E_VP_MDET_TX(i));
2392 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2393 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2395 vf->num_mdd_events++;
2396 vf_mdd_detected = true;
2400 /* Print out an error message */
2401 if (vf_mdd_detected && pf_mdd_detected)
2403 "Malicious Driver Detection event %d"
2404 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2405 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2406 else if (vf_mdd_detected && !pf_mdd_detected)
2408 "Malicious Driver Detection event %d"
2409 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2410 event, queue, pf_num, vf_num, vp_mdet_num);
2411 else if (!vf_mdd_detected && pf_mdd_detected)
2413 "Malicious Driver Detection event %d"
2414 " on TX queue %d, pf number %d (PF-%d)\n",
2415 event, queue, pf_num, pf_mdet_num);
2416 /* Theoretically shouldn't happen */
2419 "TX Malicious Driver Detection event (unknown)\n");
2423 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2425 struct i40e_hw *hw = &pf->hw;
2426 device_t dev = pf->dev;
2428 bool mdd_detected = false;
2429 bool pf_mdd_detected = false;
2430 bool vf_mdd_detected = false;
2433 u8 pf_mdet_num, vp_mdet_num;
2437 * GL_MDET_RX doesn't contain VF number information, unlike
2440 reg = rd32(hw, I40E_GL_MDET_RX);
2441 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2442 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2443 I40E_GL_MDET_RX_FUNCTION_SHIFT;
2444 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2445 I40E_GL_MDET_RX_EVENT_SHIFT;
2446 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2447 I40E_GL_MDET_RX_QUEUE_SHIFT;
2448 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2449 mdd_detected = true;
2455 reg = rd32(hw, I40E_PF_MDET_RX);
2456 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2457 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2458 pf_mdet_num = hw->pf_id;
2459 pf_mdd_detected = true;
2462 /* Check if MDD was caused by a VF */
2463 for (int i = 0; i < pf->num_vfs; i++) {
2465 reg = rd32(hw, I40E_VP_MDET_RX(i));
2466 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2467 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2469 vf->num_mdd_events++;
2470 vf_mdd_detected = true;
2474 /* Print out an error message */
2475 if (vf_mdd_detected && pf_mdd_detected)
2477 "Malicious Driver Detection event %d"
2478 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2479 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2480 else if (vf_mdd_detected && !pf_mdd_detected)
2482 "Malicious Driver Detection event %d"
2483 " on RX queue %d, pf number %d, (VF-%d)\n",
2484 event, queue, pf_num, vp_mdet_num);
2485 else if (!vf_mdd_detected && pf_mdd_detected)
2487 "Malicious Driver Detection event %d"
2488 " on RX queue %d, pf number %d (PF-%d)\n",
2489 event, queue, pf_num, pf_mdet_num);
2490 /* Theoretically shouldn't happen */
2493 "RX Malicious Driver Detection event (unknown)\n");
2497 * ixl_handle_mdd_event
2499 * Called from interrupt handler to identify possibly malicious vfs
2500 * (But also detects events from the PF, as well)
2503 ixl_handle_mdd_event(struct ixl_pf *pf)
2505 struct i40e_hw *hw = &pf->hw;
2509 * Handle both TX/RX because it's possible they could
2510 * both trigger in the same interrupt.
2512 ixl_handle_tx_mdd_event(pf);
2513 ixl_handle_rx_mdd_event(pf);
2515 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2517 /* re-enable mdd interrupt cause */
2518 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2519 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2520 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2525 ixl_enable_intr(struct ixl_vsi *vsi)
2527 struct i40e_hw *hw = vsi->hw;
2528 struct ixl_rx_queue *que = vsi->rx_queues;
2530 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2531 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2532 ixl_enable_queue(hw, que->rxr.me);
2534 ixl_enable_intr0(hw);
2538 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2540 struct i40e_hw *hw = vsi->hw;
2541 struct ixl_rx_queue *que = vsi->rx_queues;
2543 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 ixl_disable_queue(hw, que->rxr.me);
2548 ixl_enable_intr0(struct i40e_hw *hw)
2552 /* Use IXL_ITR_NONE so ITR isn't updated here */
2553 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2554 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2555 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2556 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2560 ixl_disable_intr0(struct i40e_hw *hw)
2564 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2565 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2570 ixl_enable_queue(struct i40e_hw *hw, int id)
2574 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2575 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2576 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2577 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2581 ixl_disable_queue(struct i40e_hw *hw, int id)
2585 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2586 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2590 ixl_update_stats_counters(struct ixl_pf *pf)
2592 struct i40e_hw *hw = &pf->hw;
2593 struct ixl_vsi *vsi = &pf->vsi;
2595 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2597 struct i40e_hw_port_stats *nsd = &pf->stats;
2598 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2600 /* Update hw stats */
2601 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2602 pf->stat_offsets_loaded,
2603 &osd->crc_errors, &nsd->crc_errors);
2604 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2605 pf->stat_offsets_loaded,
2606 &osd->illegal_bytes, &nsd->illegal_bytes);
2607 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2608 I40E_GLPRT_GORCL(hw->port),
2609 pf->stat_offsets_loaded,
2610 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2611 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2612 I40E_GLPRT_GOTCL(hw->port),
2613 pf->stat_offsets_loaded,
2614 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2615 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2616 pf->stat_offsets_loaded,
2617 &osd->eth.rx_discards,
2618 &nsd->eth.rx_discards);
2619 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2620 I40E_GLPRT_UPRCL(hw->port),
2621 pf->stat_offsets_loaded,
2622 &osd->eth.rx_unicast,
2623 &nsd->eth.rx_unicast);
2624 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2625 I40E_GLPRT_UPTCL(hw->port),
2626 pf->stat_offsets_loaded,
2627 &osd->eth.tx_unicast,
2628 &nsd->eth.tx_unicast);
2629 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2630 I40E_GLPRT_MPRCL(hw->port),
2631 pf->stat_offsets_loaded,
2632 &osd->eth.rx_multicast,
2633 &nsd->eth.rx_multicast);
2634 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2635 I40E_GLPRT_MPTCL(hw->port),
2636 pf->stat_offsets_loaded,
2637 &osd->eth.tx_multicast,
2638 &nsd->eth.tx_multicast);
2639 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2640 I40E_GLPRT_BPRCL(hw->port),
2641 pf->stat_offsets_loaded,
2642 &osd->eth.rx_broadcast,
2643 &nsd->eth.rx_broadcast);
2644 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2645 I40E_GLPRT_BPTCL(hw->port),
2646 pf->stat_offsets_loaded,
2647 &osd->eth.tx_broadcast,
2648 &nsd->eth.tx_broadcast);
2650 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2651 pf->stat_offsets_loaded,
2652 &osd->tx_dropped_link_down,
2653 &nsd->tx_dropped_link_down);
2654 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2655 pf->stat_offsets_loaded,
2656 &osd->mac_local_faults,
2657 &nsd->mac_local_faults);
2658 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2659 pf->stat_offsets_loaded,
2660 &osd->mac_remote_faults,
2661 &nsd->mac_remote_faults);
2662 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2663 pf->stat_offsets_loaded,
2664 &osd->rx_length_errors,
2665 &nsd->rx_length_errors);
2667 /* Flow control (LFC) stats */
2668 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2669 pf->stat_offsets_loaded,
2670 &osd->link_xon_rx, &nsd->link_xon_rx);
2671 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2672 pf->stat_offsets_loaded,
2673 &osd->link_xon_tx, &nsd->link_xon_tx);
2674 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2675 pf->stat_offsets_loaded,
2676 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2677 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2678 pf->stat_offsets_loaded,
2679 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2682 * For watchdog management we need to know if we have been paused
2683 * during the last interval, so capture that here.
2685 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2686 vsi->shared->isc_pause_frames = 1;
2688 /* Packet size stats rx */
2689 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2690 I40E_GLPRT_PRC64L(hw->port),
2691 pf->stat_offsets_loaded,
2692 &osd->rx_size_64, &nsd->rx_size_64);
2693 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2694 I40E_GLPRT_PRC127L(hw->port),
2695 pf->stat_offsets_loaded,
2696 &osd->rx_size_127, &nsd->rx_size_127);
2697 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2698 I40E_GLPRT_PRC255L(hw->port),
2699 pf->stat_offsets_loaded,
2700 &osd->rx_size_255, &nsd->rx_size_255);
2701 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2702 I40E_GLPRT_PRC511L(hw->port),
2703 pf->stat_offsets_loaded,
2704 &osd->rx_size_511, &nsd->rx_size_511);
2705 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2706 I40E_GLPRT_PRC1023L(hw->port),
2707 pf->stat_offsets_loaded,
2708 &osd->rx_size_1023, &nsd->rx_size_1023);
2709 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2710 I40E_GLPRT_PRC1522L(hw->port),
2711 pf->stat_offsets_loaded,
2712 &osd->rx_size_1522, &nsd->rx_size_1522);
2713 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2714 I40E_GLPRT_PRC9522L(hw->port),
2715 pf->stat_offsets_loaded,
2716 &osd->rx_size_big, &nsd->rx_size_big);
2718 /* Packet size stats tx */
2719 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2720 I40E_GLPRT_PTC64L(hw->port),
2721 pf->stat_offsets_loaded,
2722 &osd->tx_size_64, &nsd->tx_size_64);
2723 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2724 I40E_GLPRT_PTC127L(hw->port),
2725 pf->stat_offsets_loaded,
2726 &osd->tx_size_127, &nsd->tx_size_127);
2727 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2728 I40E_GLPRT_PTC255L(hw->port),
2729 pf->stat_offsets_loaded,
2730 &osd->tx_size_255, &nsd->tx_size_255);
2731 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2732 I40E_GLPRT_PTC511L(hw->port),
2733 pf->stat_offsets_loaded,
2734 &osd->tx_size_511, &nsd->tx_size_511);
2735 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2736 I40E_GLPRT_PTC1023L(hw->port),
2737 pf->stat_offsets_loaded,
2738 &osd->tx_size_1023, &nsd->tx_size_1023);
2739 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2740 I40E_GLPRT_PTC1522L(hw->port),
2741 pf->stat_offsets_loaded,
2742 &osd->tx_size_1522, &nsd->tx_size_1522);
2743 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2744 I40E_GLPRT_PTC9522L(hw->port),
2745 pf->stat_offsets_loaded,
2746 &osd->tx_size_big, &nsd->tx_size_big);
2748 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2749 pf->stat_offsets_loaded,
2750 &osd->rx_undersize, &nsd->rx_undersize);
2751 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2752 pf->stat_offsets_loaded,
2753 &osd->rx_fragments, &nsd->rx_fragments);
2754 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2755 pf->stat_offsets_loaded,
2756 &osd->rx_oversize, &nsd->rx_oversize);
2757 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2758 pf->stat_offsets_loaded,
2759 &osd->rx_jabber, &nsd->rx_jabber);
2760 pf->stat_offsets_loaded = true;
2763 /* Update vsi stats */
2764 ixl_update_vsi_stats(vsi);
2766 for (int i = 0; i < pf->num_vfs; i++) {
2768 if (vf->vf_flags & VF_FLAG_ENABLED)
2769 ixl_update_eth_stats(&pf->vfs[i].vsi);
2774 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2776 struct i40e_hw *hw = &pf->hw;
2777 device_t dev = pf->dev;
2780 error = i40e_shutdown_lan_hmc(hw);
2783 "Shutdown LAN HMC failed with code %d\n", error);
2785 ixl_disable_intr0(hw);
2787 error = i40e_shutdown_adminq(hw);
2790 "Shutdown Admin queue failed with code %d\n", error);
2792 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2797 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2799 struct i40e_hw *hw = &pf->hw;
2800 struct ixl_vsi *vsi = &pf->vsi;
2801 device_t dev = pf->dev;
2804 device_printf(dev, "Rebuilding driver state...\n");
2806 error = i40e_pf_reset(hw);
2808 device_printf(dev, "PF reset failure %s\n",
2809 i40e_stat_str(hw, error));
2810 goto ixl_rebuild_hw_structs_after_reset_err;
2814 error = i40e_init_adminq(hw);
2815 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2816 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2818 goto ixl_rebuild_hw_structs_after_reset_err;
2821 i40e_clear_pxe_mode(hw);
2823 error = ixl_get_hw_capabilities(pf);
2825 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2826 goto ixl_rebuild_hw_structs_after_reset_err;
2829 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2830 hw->func_caps.num_rx_qp, 0, 0);
2832 device_printf(dev, "init_lan_hmc failed: %d\n", error);
2833 goto ixl_rebuild_hw_structs_after_reset_err;
2836 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2838 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2839 goto ixl_rebuild_hw_structs_after_reset_err;
2842 /* reserve a contiguous allocation for the PF's VSI */
2843 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2845 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2847 /* TODO: error handling */
2850 error = ixl_switch_config(pf);
2852 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2855 goto ixl_rebuild_hw_structs_after_reset_err;
2858 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2861 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2862 " aq_err %d\n", error, hw->aq.asq_last_status);
2864 goto ixl_rebuild_hw_structs_after_reset_err;
2868 error = i40e_set_fc(hw, &set_fc_err_mask, true);
2870 device_printf(dev, "init: setting link flow control failed; retcode %d,"
2871 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2873 goto ixl_rebuild_hw_structs_after_reset_err;
2876 /* Remove default filters reinstalled by FW on reset */
2877 ixl_del_default_hw_filters(vsi);
2879 /* Determine link state */
2880 if (ixl_attach_get_link_status(pf)) {
2882 /* TODO: error handling */
2885 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2886 ixl_get_fw_lldp_status(pf);
2888 /* Keep admin queue interrupts active while driver is loaded */
2889 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2890 ixl_configure_intr0_msix(pf);
2891 ixl_enable_intr0(hw);
2894 device_printf(dev, "Rebuilding driver state done.\n");
2897 ixl_rebuild_hw_structs_after_reset_err:
2898 device_printf(dev, "Reload the driver to recover\n");
2903 ixl_handle_empr_reset(struct ixl_pf *pf)
2905 struct ixl_vsi *vsi = &pf->vsi;
2906 struct i40e_hw *hw = &pf->hw;
2907 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2911 ixl_prepare_for_reset(pf, is_up);
2913 /* Typically finishes within 3-4 seconds */
2914 while (count++ < 100) {
2915 reg = rd32(hw, I40E_GLGEN_RSTAT)
2916 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2918 i40e_msec_delay(100);
2922 ixl_dbg(pf, IXL_DBG_INFO,
2923 "Reset wait count: %d\n", count);
2925 ixl_rebuild_hw_structs_after_reset(pf);
2927 atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2931 * Update VSI-specific ethernet statistics counters.
2934 ixl_update_eth_stats(struct ixl_vsi *vsi)
2936 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2937 struct i40e_hw *hw = &pf->hw;
2938 struct i40e_eth_stats *es;
2939 struct i40e_eth_stats *oes;
2940 struct i40e_hw_port_stats *nsd;
2941 u16 stat_idx = vsi->info.stat_counter_idx;
2943 es = &vsi->eth_stats;
2944 oes = &vsi->eth_stats_offsets;
2947 /* Gather up the stats that the hw collects */
2948 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2949 vsi->stat_offsets_loaded,
2950 &oes->tx_errors, &es->tx_errors);
2951 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2952 vsi->stat_offsets_loaded,
2953 &oes->rx_discards, &es->rx_discards);
2955 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2956 I40E_GLV_GORCL(stat_idx),
2957 vsi->stat_offsets_loaded,
2958 &oes->rx_bytes, &es->rx_bytes);
2959 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2960 I40E_GLV_UPRCL(stat_idx),
2961 vsi->stat_offsets_loaded,
2962 &oes->rx_unicast, &es->rx_unicast);
2963 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2964 I40E_GLV_MPRCL(stat_idx),
2965 vsi->stat_offsets_loaded,
2966 &oes->rx_multicast, &es->rx_multicast);
2967 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2968 I40E_GLV_BPRCL(stat_idx),
2969 vsi->stat_offsets_loaded,
2970 &oes->rx_broadcast, &es->rx_broadcast);
2972 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2973 I40E_GLV_GOTCL(stat_idx),
2974 vsi->stat_offsets_loaded,
2975 &oes->tx_bytes, &es->tx_bytes);
2976 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2977 I40E_GLV_UPTCL(stat_idx),
2978 vsi->stat_offsets_loaded,
2979 &oes->tx_unicast, &es->tx_unicast);
2980 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2981 I40E_GLV_MPTCL(stat_idx),
2982 vsi->stat_offsets_loaded,
2983 &oes->tx_multicast, &es->tx_multicast);
2984 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2985 I40E_GLV_BPTCL(stat_idx),
2986 vsi->stat_offsets_loaded,
2987 &oes->tx_broadcast, &es->tx_broadcast);
2988 vsi->stat_offsets_loaded = true;
2992 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2996 struct i40e_eth_stats *es;
2999 struct i40e_hw_port_stats *nsd;
3003 es = &vsi->eth_stats;
3006 ixl_update_eth_stats(vsi);
3008 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3010 /* Update ifnet stats */
3011 IXL_SET_IPACKETS(vsi, es->rx_unicast +
3014 IXL_SET_OPACKETS(vsi, es->tx_unicast +
3017 IXL_SET_IBYTES(vsi, es->rx_bytes);
3018 IXL_SET_OBYTES(vsi, es->tx_bytes);
3019 IXL_SET_IMCASTS(vsi, es->rx_multicast);
3020 IXL_SET_OMCASTS(vsi, es->tx_multicast);
3022 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3023 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3025 IXL_SET_OERRORS(vsi, es->tx_errors);
3026 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3027 IXL_SET_OQDROPS(vsi, tx_discards);
3028 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3029 IXL_SET_COLLISIONS(vsi, 0);
3033 * Reset all of the stats for the given pf
3036 ixl_pf_reset_stats(struct ixl_pf *pf)
3038 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3039 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3040 pf->stat_offsets_loaded = false;
3044 * Resets all stats of the given vsi
3047 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3049 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3050 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3051 vsi->stat_offsets_loaded = false;
3055 * Read and update a 48 bit stat from the hw
3057 * Since the device stats are not reset at PFReset, they likely will not
3058 * be zeroed when the driver starts. We'll save the first values read
3059 * and use them as offsets to be subtracted from the raw values in order
3060 * to report stats that count from zero.
3063 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3064 bool offset_loaded, u64 *offset, u64 *stat)
3068 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3069 new_data = rd64(hw, loreg);
3072 * Use two rd32's instead of one rd64; FreeBSD versions before
3073 * 10 don't support 64-bit bus reads/writes.
3075 new_data = rd32(hw, loreg);
3076 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3081 if (new_data >= *offset)
3082 *stat = new_data - *offset;
3084 *stat = (new_data + ((u64)1 << 48)) - *offset;
3085 *stat &= 0xFFFFFFFFFFFFULL;
3089 * Read and update a 32 bit stat from the hw
3092 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3093 bool offset_loaded, u64 *offset, u64 *stat)
3097 new_data = rd32(hw, reg);
3100 if (new_data >= *offset)
3101 *stat = (u32)(new_data - *offset);
3103 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3107 ixl_add_device_sysctls(struct ixl_pf *pf)
3109 device_t dev = pf->dev;
3110 struct i40e_hw *hw = &pf->hw;
3112 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3113 struct sysctl_oid_list *ctx_list =
3114 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3116 struct sysctl_oid *debug_node;
3117 struct sysctl_oid_list *debug_list;
3119 struct sysctl_oid *fec_node;
3120 struct sysctl_oid_list *fec_list;
3122 /* Set up sysctls */
3123 SYSCTL_ADD_PROC(ctx, ctx_list,
3124 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3125 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3127 SYSCTL_ADD_PROC(ctx, ctx_list,
3128 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3129 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3131 SYSCTL_ADD_PROC(ctx, ctx_list,
3132 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3133 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3135 SYSCTL_ADD_PROC(ctx, ctx_list,
3136 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3137 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3139 SYSCTL_ADD_PROC(ctx, ctx_list,
3140 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3141 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3143 SYSCTL_ADD_PROC(ctx, ctx_list,
3144 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3145 pf, 0, ixl_sysctl_unallocated_queues, "I",
3146 "Queues not allocated to a PF or VF");
3148 SYSCTL_ADD_PROC(ctx, ctx_list,
3149 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3150 pf, 0, ixl_sysctl_pf_tx_itr, "I",
3151 "Immediately set TX ITR value for all queues");
3153 SYSCTL_ADD_PROC(ctx, ctx_list,
3154 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3155 pf, 0, ixl_sysctl_pf_rx_itr, "I",
3156 "Immediately set RX ITR value for all queues");
3158 SYSCTL_ADD_INT(ctx, ctx_list,
3159 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3160 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3162 SYSCTL_ADD_INT(ctx, ctx_list,
3163 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3164 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3166 /* Add FEC sysctls for 25G adapters */
3167 if (i40e_is_25G_device(hw->device_id)) {
3168 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3169 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3170 fec_list = SYSCTL_CHILDREN(fec_node);
3172 SYSCTL_ADD_PROC(ctx, fec_list,
3173 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3174 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3176 SYSCTL_ADD_PROC(ctx, fec_list,
3177 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3178 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3180 SYSCTL_ADD_PROC(ctx, fec_list,
3181 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3182 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3184 SYSCTL_ADD_PROC(ctx, fec_list,
3185 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3186 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3188 SYSCTL_ADD_PROC(ctx, fec_list,
3189 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3190 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3193 SYSCTL_ADD_PROC(ctx, ctx_list,
3194 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3195 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3197 /* Add sysctls meant to print debug information, but don't list them
3198 * in "sysctl -a" output. */
3199 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3200 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3201 debug_list = SYSCTL_CHILDREN(debug_node);
3203 SYSCTL_ADD_UINT(ctx, debug_list,
3204 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3205 &pf->hw.debug_mask, 0, "Shared code debug message level");
3207 SYSCTL_ADD_UINT(ctx, debug_list,
3208 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3209 &pf->dbg_mask, 0, "Non-shared code debug message level");
3211 SYSCTL_ADD_PROC(ctx, debug_list,
3212 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3213 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3215 SYSCTL_ADD_PROC(ctx, debug_list,
3216 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3217 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3219 SYSCTL_ADD_PROC(ctx, debug_list,
3220 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3221 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3223 SYSCTL_ADD_PROC(ctx, debug_list,
3224 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3225 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3227 SYSCTL_ADD_PROC(ctx, debug_list,
3228 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3229 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3231 SYSCTL_ADD_PROC(ctx, debug_list,
3232 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3233 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3235 SYSCTL_ADD_PROC(ctx, debug_list,
3236 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3237 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3239 SYSCTL_ADD_PROC(ctx, debug_list,
3240 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3241 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3243 SYSCTL_ADD_PROC(ctx, debug_list,
3244 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3245 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3247 SYSCTL_ADD_PROC(ctx, debug_list,
3248 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3249 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3251 SYSCTL_ADD_PROC(ctx, debug_list,
3252 OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3253 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3255 SYSCTL_ADD_PROC(ctx, debug_list,
3256 OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3259 SYSCTL_ADD_PROC(ctx, debug_list,
3260 OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3261 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3263 SYSCTL_ADD_PROC(ctx, debug_list,
3264 OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3265 pf, 0, ixl_sysctl_do_emp_reset, "I",
3266 "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3268 SYSCTL_ADD_PROC(ctx, debug_list,
3269 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3270 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3273 SYSCTL_ADD_PROC(ctx, debug_list,
3274 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3275 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3277 SYSCTL_ADD_PROC(ctx, debug_list,
3278 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3279 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3281 SYSCTL_ADD_PROC(ctx, debug_list,
3282 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3283 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3288 * Primarily for finding out how many queues can be assigned to VFs,
3292 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3294 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3297 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3299 return sysctl_handle_int(oidp, NULL, queues, req);
3303 ** Set flow control using sysctl:
3310 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3312 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3313 struct i40e_hw *hw = &pf->hw;
3314 device_t dev = pf->dev;
3315 int requested_fc, error = 0;
3316 enum i40e_status_code aq_error = 0;
3320 requested_fc = pf->fc;
3321 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3322 if ((error) || (req->newptr == NULL))
3324 if (requested_fc < 0 || requested_fc > 3) {
3326 "Invalid fc mode; valid modes are 0 through 3\n");
3330 /* Set fc ability for port */
3331 hw->fc.requested_mode = requested_fc;
3332 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3335 "%s: Error setting new fc mode %d; fc_err %#x\n",
3336 __func__, aq_error, fc_aq_err);
3339 pf->fc = requested_fc;
3345 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3359 switch (link_speed) {
3360 case I40E_LINK_SPEED_100MB:
3363 case I40E_LINK_SPEED_1GB:
3366 case I40E_LINK_SPEED_10GB:
3369 case I40E_LINK_SPEED_40GB:
3372 case I40E_LINK_SPEED_20GB:
3375 case I40E_LINK_SPEED_25GB:
3378 case I40E_LINK_SPEED_UNKNOWN:
3384 return speeds[index];
3388 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3390 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3391 struct i40e_hw *hw = &pf->hw;
3394 ixl_update_link_status(pf);
3396 error = sysctl_handle_string(oidp,
3397 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3403 * Converts 8-bit speeds value to and from sysctl flags and
3404 * Admin Queue flags.
3407 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3409 static u16 speedmap[6] = {
3410 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
3411 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
3412 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
3413 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
3414 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
3415 (I40E_LINK_SPEED_40GB | (0x20 << 8))
3419 for (int i = 0; i < 6; i++) {
3421 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3423 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3430 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3432 struct i40e_hw *hw = &pf->hw;
3433 device_t dev = pf->dev;
3434 struct i40e_aq_get_phy_abilities_resp abilities;
3435 struct i40e_aq_set_phy_config config;
3436 enum i40e_status_code aq_error = 0;
3438 /* Get current capability information */
3439 aq_error = i40e_aq_get_phy_capabilities(hw,
3440 FALSE, FALSE, &abilities, NULL);
3443 "%s: Error getting phy capabilities %d,"
3444 " aq error: %d\n", __func__, aq_error,
3445 hw->aq.asq_last_status);
3449 /* Prepare new config */
3450 bzero(&config, sizeof(config));
3452 config.link_speed = speeds;
3454 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3455 config.phy_type = abilities.phy_type;
3456 config.phy_type_ext = abilities.phy_type_ext;
3457 config.abilities = abilities.abilities
3458 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3459 config.eee_capability = abilities.eee_capability;
3460 config.eeer = abilities.eeer_val;
3461 config.low_power_ctrl = abilities.d3_lpan;
3462 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3464 /* Do aq command & restart link */
3465 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3468 "%s: Error setting new phy config %d,"
3469 " aq error: %d\n", __func__, aq_error,
3470 hw->aq.asq_last_status);
3478 ** Supported link speedsL
3488 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3490 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3491 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3493 return sysctl_handle_int(oidp, NULL, supported, req);
3497 ** Control link advertise speed:
3499 ** 0x1 - advertise 100 Mb
3500 ** 0x2 - advertise 1G
3501 ** 0x4 - advertise 10G
3502 ** 0x8 - advertise 20G
3503 ** 0x10 - advertise 25G
3504 ** 0x20 - advertise 40G
3506 ** Set to 0 to disable link
3509 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3511 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3512 device_t dev = pf->dev;
3513 u8 converted_speeds;
3514 int requested_ls = 0;
3517 /* Read in new mode */
3518 requested_ls = pf->advertised_speed;
3519 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3520 if ((error) || (req->newptr == NULL))
3523 /* Error out if bits outside of possible flag range are set */
3524 if ((requested_ls & ~((u8)0x3F)) != 0) {
3525 device_printf(dev, "Input advertised speed out of range; "
3526 "valid flags are: 0x%02x\n",
3527 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3531 /* Check if adapter supports input value */
3532 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3533 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3534 device_printf(dev, "Invalid advertised speed; "
3535 "valid flags are: 0x%02x\n",
3536 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3540 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3544 pf->advertised_speed = requested_ls;
3545 ixl_update_link_status(pf);
3550 ** Get the width and transaction speed of
3551 ** the bus this adapter is plugged into.
3554 ixl_get_bus_info(struct ixl_pf *pf)
3556 struct i40e_hw *hw = &pf->hw;
3557 device_t dev = pf->dev;
3559 u32 offset, num_ports;
3562 /* Some devices don't use PCIE */
3563 if (hw->mac.type == I40E_MAC_X722)
3566 /* Read PCI Express Capabilities Link Status Register */
3567 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3568 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3570 /* Fill out hw struct with PCIE info */
3571 i40e_set_pci_config_data(hw, link);
3573 /* Use info to print out bandwidth messages */
3574 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3575 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3576 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3577 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3578 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3579 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3580 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3581 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3585 * If adapter is in slot with maximum supported speed,
3586 * no warning message needs to be printed out.
3588 if (hw->bus.speed >= i40e_bus_speed_8000
3589 && hw->bus.width >= i40e_bus_width_pcie_x8)
3592 num_ports = bitcount32(hw->func_caps.valid_functions);
3593 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3595 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3596 device_printf(dev, "PCI-Express bandwidth available"
3597 " for this device may be insufficient for"
3598 " optimal performance.\n");
3599 device_printf(dev, "Please move the device to a different"
3600 " PCI-e link with more lanes and/or higher"
3601 " transfer rate.\n");
3606 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3608 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3609 struct i40e_hw *hw = &pf->hw;
3612 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3613 ixl_nvm_version_str(hw, sbuf);
3621 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3623 if ((nvma->command == I40E_NVM_READ) &&
3624 ((nvma->config & 0xFF) == 0xF) &&
3625 (((nvma->config & 0xF00) >> 8) == 0xF) &&
3626 (nvma->offset == 0) &&
3627 (nvma->data_size == 1)) {
3628 // device_printf(dev, "- Get Driver Status Command\n");
3630 else if (nvma->command == I40E_NVM_READ) {
3634 switch (nvma->command) {
3636 device_printf(dev, "- command: I40E_NVM_READ\n");
3639 device_printf(dev, "- command: I40E_NVM_WRITE\n");
3642 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3646 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
3647 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3648 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3649 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3654 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3656 struct i40e_hw *hw = &pf->hw;
3657 struct i40e_nvm_access *nvma;
3658 device_t dev = pf->dev;
3659 enum i40e_status_code status = 0;
3660 size_t nvma_size, ifd_len, exp_len;
3663 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3666 nvma_size = sizeof(struct i40e_nvm_access);
3667 ifd_len = ifd->ifd_len;
3669 if (ifd_len < nvma_size ||
3670 ifd->ifd_data == NULL) {
3671 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3673 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3674 __func__, ifd_len, nvma_size);
3675 device_printf(dev, "%s: data pointer: %p\n", __func__,
3680 nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3681 err = copyin(ifd->ifd_data, nvma, ifd_len);
3683 device_printf(dev, "%s: Cannot get request from user space\n",
3685 free(nvma, M_DEVBUF);
3689 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3690 ixl_print_nvm_cmd(dev, nvma);
3692 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3694 while (count++ < 100) {
3695 i40e_msec_delay(100);
3696 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3701 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3702 free(nvma, M_DEVBUF);
3706 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3707 device_printf(dev, "%s: invalid request, data size not in supported range\n",
3709 free(nvma, M_DEVBUF);
3714 * Older versions of the NVM update tool don't set ifd_len to the size
3715 * of the entire buffer passed to the ioctl. Check the data_size field
3716 * in the contained i40e_nvm_access struct and ensure everything is
3717 * copied in from userspace.
3719 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3721 if (ifd_len < exp_len) {
3723 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3724 err = copyin(ifd->ifd_data, nvma, ifd_len);
3726 device_printf(dev, "%s: Cannot get request from user space\n",
3728 free(nvma, M_DEVBUF);
3733 // TODO: Might need a different lock here
3735 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3736 // IXL_PF_UNLOCK(pf);
3738 err = copyout(nvma, ifd->ifd_data, ifd_len);
3739 free(nvma, M_DEVBUF);
3741 device_printf(dev, "%s: Cannot return data to user space\n",
3746 /* Let the nvmupdate report errors, show them only when debug is enabled */
3747 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3748 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3749 i40e_stat_str(hw, status), perrno);
3752 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3753 * to run this ioctl again. So use -EACCES for -EPERM instead.
3755 if (perrno == -EPERM)
3762 ixl_find_i2c_interface(struct ixl_pf *pf)
3764 struct i40e_hw *hw = &pf->hw;
3765 bool i2c_en, port_matched;
3768 for (int i = 0; i < 4; i++) {
3769 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3770 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3771 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3772 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3774 if (i2c_en && port_matched)
3782 ixl_phy_type_string(u32 bit_pos, bool ext)
3784 static char * phy_types_str[32] = {
3814 "1000BASE-T Optical",
3818 static char * ext_phy_types_str[8] = {
3829 if (ext && bit_pos > 7) return "Invalid_Ext";
3830 if (bit_pos > 31) return "Invalid";
3832 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3835 /* TODO: ERJ: I don't this is necessary anymore. */
3837 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3839 device_t dev = pf->dev;
3840 struct i40e_hw *hw = &pf->hw;
3841 struct i40e_aq_desc desc;
3842 enum i40e_status_code status;
3844 struct i40e_aqc_get_link_status *aq_link_status =
3845 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3847 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3848 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3849 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3852 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3853 __func__, i40e_stat_str(hw, status),
3854 i40e_aq_str(hw, hw->aq.asq_last_status));
3858 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3863 ixl_phy_type_string_ls(u8 val)
3866 return ixl_phy_type_string(val - 0x1F, true);
3868 return ixl_phy_type_string(val, false);
3872 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3874 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3875 device_t dev = pf->dev;
3879 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3881 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3885 struct i40e_aqc_get_link_status link_status;
3886 error = ixl_aq_get_link_status(pf, &link_status);
3892 sbuf_printf(buf, "\n"
3893 "PHY Type : 0x%02x<%s>\n"
3895 "Link info: 0x%02x\n"
3896 "AN info : 0x%02x\n"
3897 "Ext info : 0x%02x\n"
3898 "Loopback : 0x%02x\n"
3902 link_status.phy_type,
3903 ixl_phy_type_string_ls(link_status.phy_type),
3904 link_status.link_speed,
3905 link_status.link_info,
3906 link_status.an_info,
3907 link_status.ext_info,
3908 link_status.loopback,
3909 link_status.max_frame_size,
3911 link_status.power_desc);
3913 error = sbuf_finish(buf);
3915 device_printf(dev, "Error finishing sbuf: %d\n", error);
3922 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3924 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3925 struct i40e_hw *hw = &pf->hw;
3926 device_t dev = pf->dev;
3927 enum i40e_status_code status;
3928 struct i40e_aq_get_phy_abilities_resp abilities;
3932 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3934 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3938 status = i40e_aq_get_phy_capabilities(hw,
3939 FALSE, FALSE, &abilities, NULL);
3942 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3943 __func__, i40e_stat_str(hw, status),
3944 i40e_aq_str(hw, hw->aq.asq_last_status));
3949 sbuf_printf(buf, "\n"
3951 abilities.phy_type);
3953 if (abilities.phy_type != 0) {
3954 sbuf_printf(buf, "<");
3955 for (int i = 0; i < 32; i++)
3956 if ((1 << i) & abilities.phy_type)
3957 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3958 sbuf_printf(buf, ">\n");
3961 sbuf_printf(buf, "PHY Ext : %02x",
3962 abilities.phy_type_ext);
3964 if (abilities.phy_type_ext != 0) {
3965 sbuf_printf(buf, "<");
3966 for (int i = 0; i < 4; i++)
3967 if ((1 << i) & abilities.phy_type_ext)
3968 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3969 sbuf_printf(buf, ">");
3971 sbuf_printf(buf, "\n");
3979 "ID : %02x %02x %02x %02x\n"
3980 "ModType : %02x %02x %02x\n"
3984 abilities.link_speed,
3985 abilities.abilities, abilities.eee_capability,
3986 abilities.eeer_val, abilities.d3_lpan,
3987 abilities.phy_id[0], abilities.phy_id[1],
3988 abilities.phy_id[2], abilities.phy_id[3],
3989 abilities.module_type[0], abilities.module_type[1],
3990 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3991 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3992 abilities.ext_comp_code);
3994 error = sbuf_finish(buf);
3996 device_printf(dev, "Error finishing sbuf: %d\n", error);
4003 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4005 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4006 struct ixl_vsi *vsi = &pf->vsi;
4007 struct ixl_mac_filter *f;
4008 device_t dev = pf->dev;
4009 int error = 0, ftl_len = 0, ftl_counter = 0;
4013 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4015 device_printf(dev, "Could not allocate sbuf for output.\n");
4019 sbuf_printf(buf, "\n");
4021 /* Print MAC filters */
4022 sbuf_printf(buf, "PF Filters:\n");
4023 SLIST_FOREACH(f, &vsi->ftl, next)
4027 sbuf_printf(buf, "(none)\n");
4029 SLIST_FOREACH(f, &vsi->ftl, next) {
4031 MAC_FORMAT ", vlan %4d, flags %#06x",
4032 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4033 /* don't print '\n' for last entry */
4034 if (++ftl_counter != ftl_len)
4035 sbuf_printf(buf, "\n");
4040 /* TODO: Give each VF its own filter list sysctl */
4042 if (pf->num_vfs > 0) {
4043 sbuf_printf(buf, "\n\n");
4044 for (int i = 0; i < pf->num_vfs; i++) {
4046 if (!(vf->vf_flags & VF_FLAG_ENABLED))
4050 ftl_len = 0, ftl_counter = 0;
4051 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4052 SLIST_FOREACH(f, &vsi->ftl, next)
4056 sbuf_printf(buf, "(none)\n");
4058 SLIST_FOREACH(f, &vsi->ftl, next) {
4060 MAC_FORMAT ", vlan %4d, flags %#06x\n",
4061 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4068 error = sbuf_finish(buf);
4070 device_printf(dev, "Error finishing sbuf: %d\n", error);
4076 #define IXL_SW_RES_SIZE 0x14
4078 ixl_res_alloc_cmp(const void *a, const void *b)
4080 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4081 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4082 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4084 return ((int)one->resource_type - (int)two->resource_type);
4088 * Longest string length: 25
4091 ixl_switch_res_type_string(u8 type)
4093 // TODO: This should be changed to static const
4094 char * ixl_switch_res_type_strings[0x14] = {
4097 "Perfect Match MAC address",
4100 "Multicast hash entry",
4101 "Unicast hash entry",
4105 "VLAN Statistic Pool",
4108 "Inner VLAN Forward filter",
4118 return ixl_switch_res_type_strings[type];
4120 return "(Reserved)";
4124 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4126 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4127 struct i40e_hw *hw = &pf->hw;
4128 device_t dev = pf->dev;
4130 enum i40e_status_code status;
4134 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4136 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4138 device_printf(dev, "Could not allocate sbuf for output.\n");
4142 bzero(resp, sizeof(resp));
4143 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4149 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4150 __func__, i40e_stat_str(hw, status),
4151 i40e_aq_str(hw, hw->aq.asq_last_status));
4156 /* Sort entries by type for display */
4157 qsort(resp, num_entries,
4158 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4159 &ixl_res_alloc_cmp);
4161 sbuf_cat(buf, "\n");
4162 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4164 " Type | Guaranteed | Total | Used | Un-allocated\n"
4165 " | (this) | (all) | (this) | (all) \n");
4166 for (int i = 0; i < num_entries; i++) {
4168 "%25s | %10d %5d %6d %12d",
4169 ixl_switch_res_type_string(resp[i].resource_type),
4173 resp[i].total_unalloced);
4174 if (i < num_entries - 1)
4175 sbuf_cat(buf, "\n");
4178 error = sbuf_finish(buf);
4180 device_printf(dev, "Error finishing sbuf: %d\n", error);
4187 ** Caller must init and delete sbuf; this function will clear and
4188 ** finish it for caller.
4191 ixl_switch_element_string(struct sbuf *s,
4192 struct i40e_aqc_switch_config_element_resp *element)
4196 switch (element->element_type) {
4197 case I40E_AQ_SW_ELEM_TYPE_MAC:
4198 sbuf_printf(s, "MAC %3d", element->element_info);
4200 case I40E_AQ_SW_ELEM_TYPE_PF:
4201 sbuf_printf(s, "PF %3d", element->element_info);
4203 case I40E_AQ_SW_ELEM_TYPE_VF:
4204 sbuf_printf(s, "VF %3d", element->element_info);
4206 case I40E_AQ_SW_ELEM_TYPE_EMP:
4209 case I40E_AQ_SW_ELEM_TYPE_BMC:
4212 case I40E_AQ_SW_ELEM_TYPE_PV:
4215 case I40E_AQ_SW_ELEM_TYPE_VEB:
4218 case I40E_AQ_SW_ELEM_TYPE_PA:
4221 case I40E_AQ_SW_ELEM_TYPE_VSI:
4222 sbuf_printf(s, "VSI %3d", element->element_info);
4230 return sbuf_data(s);
4234 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4236 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4237 struct i40e_hw *hw = &pf->hw;
4238 device_t dev = pf->dev;
4241 enum i40e_status_code status;
4244 u8 aq_buf[I40E_AQ_LARGE_BUF];
4246 struct i40e_aqc_get_switch_config_resp *sw_config;
4247 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4249 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4251 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4255 status = i40e_aq_get_switch_config(hw, sw_config,
4256 sizeof(aq_buf), &next, NULL);
4259 "%s: aq_get_switch_config() error %s, aq error %s\n",
4260 __func__, i40e_stat_str(hw, status),
4261 i40e_aq_str(hw, hw->aq.asq_last_status));
4266 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4269 nmbuf = sbuf_new_auto();
4271 device_printf(dev, "Could not allocate sbuf for name output.\n");
4276 sbuf_cat(buf, "\n");
4277 /* Assuming <= 255 elements in switch */
4278 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4279 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4281 ** Revision -- all elements are revision 1 for now
4284 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4285 " | | | (uplink)\n");
4286 for (int i = 0; i < sw_config->header.num_reported; i++) {
4287 // "%4d (%8s) | %8s %8s %#8x",
4288 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4290 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4291 &sw_config->element[i]));
4292 sbuf_cat(buf, " | ");
4293 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4295 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4297 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4298 if (i < sw_config->header.num_reported - 1)
4299 sbuf_cat(buf, "\n");
4303 error = sbuf_finish(buf);
4305 device_printf(dev, "Error finishing sbuf: %d\n", error);
4313 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4315 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4316 struct i40e_hw *hw = &pf->hw;
4317 device_t dev = pf->dev;
4320 enum i40e_status_code status;
4323 struct i40e_aqc_get_set_rss_key_data key_data;
4325 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4327 device_printf(dev, "Could not allocate sbuf for output.\n");
4331 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4333 sbuf_cat(buf, "\n");
4334 if (hw->mac.type == I40E_MAC_X722) {
4335 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4337 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4338 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4340 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4341 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4342 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4346 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4348 error = sbuf_finish(buf);
4350 device_printf(dev, "Error finishing sbuf: %d\n", error);
4357 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4362 if (length < 1 || buf == NULL) return;
4364 int byte_stride = 16;
4365 int lines = length / byte_stride;
4366 int rem = length % byte_stride;
4370 for (i = 0; i < lines; i++) {
4371 width = (rem > 0 && i == lines - 1)
4372 ? rem : byte_stride;
4374 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4376 for (j = 0; j < width; j++)
4377 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4379 if (width < byte_stride) {
4380 for (k = 0; k < (byte_stride - width); k++)
4381 sbuf_printf(sb, " ");
4385 sbuf_printf(sb, "\n");
4389 for (j = 0; j < width; j++) {
4390 c = (char)buf[i * byte_stride + j];
4391 if (c < 32 || c > 126)
4392 sbuf_printf(sb, ".");
4394 sbuf_printf(sb, "%c", c);
4397 sbuf_printf(sb, "\n");
4403 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4405 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4406 struct i40e_hw *hw = &pf->hw;
4407 device_t dev = pf->dev;
4410 enum i40e_status_code status;
4414 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4416 device_printf(dev, "Could not allocate sbuf for output.\n");
4420 bzero(hlut, sizeof(hlut));
4421 sbuf_cat(buf, "\n");
4422 if (hw->mac.type == I40E_MAC_X722) {
4423 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4425 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4426 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4428 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4429 reg = rd32(hw, I40E_PFQF_HLUT(i));
4430 bcopy(®, &hlut[i << 2], 4);
4433 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4435 error = sbuf_finish(buf);
4437 device_printf(dev, "Error finishing sbuf: %d\n", error);
4444 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4446 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4447 struct i40e_hw *hw = &pf->hw;
4450 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4451 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4453 return sysctl_handle_long(oidp, NULL, hena, req);
4457 * Sysctl to disable firmware's link management
4459 * 1 - Disable link management on this port
4460 * 0 - Re-enable link management
4462 * On normal NVMs, firmware manages link by default.
4465 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4467 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4468 struct i40e_hw *hw = &pf->hw;
4469 device_t dev = pf->dev;
4470 int requested_mode = -1;
4471 enum i40e_status_code status = 0;
4474 /* Read in new mode */
4475 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4476 if ((error) || (req->newptr == NULL))
4478 /* Check for sane value */
4479 if (requested_mode < 0 || requested_mode > 1) {
4480 device_printf(dev, "Valid modes are 0 or 1\n");
4485 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4488 "%s: Error setting new phy debug mode %s,"
4489 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4490 i40e_aq_str(hw, hw->aq.asq_last_status));
4498 * Read some diagnostic data from an SFP module
4499 * Bytes 96-99, 102-105 from device address 0xA2
4502 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4504 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 device_t dev = pf->dev;
4510 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4512 device_printf(dev, "Error reading from i2c\n");
4515 if (output != 0x3) {
4516 device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4520 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4521 if (!(output & 0x60)) {
4522 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4526 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4528 for (u8 offset = 96; offset < 100; offset++) {
4529 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4530 sbuf_printf(sbuf, "%02X ", output);
4532 for (u8 offset = 102; offset < 106; offset++) {
4533 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4534 sbuf_printf(sbuf, "%02X ", output);
4544 * Sysctl to read a byte from I2C bus.
4546 * Input: 32-bit value:
4547 * bits 0-7: device address (0xA0 or 0xA2)
4548 * bits 8-15: offset (0-255)
4549 * bits 16-31: unused
4550 * Output: 8-bit value read
4553 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4555 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4556 device_t dev = pf->dev;
4557 int input = -1, error = 0;
4558 u8 dev_addr, offset, output;
4560 /* Read in I2C read parameters */
4561 error = sysctl_handle_int(oidp, &input, 0, req);
4562 if ((error) || (req->newptr == NULL))
4564 /* Validate device address */
4565 dev_addr = input & 0xFF;
4566 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4569 offset = (input >> 8) & 0xFF;
4571 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4575 device_printf(dev, "%02X\n", output);
4580 * Sysctl to write a byte to the I2C bus.
4582 * Input: 32-bit value:
4583 * bits 0-7: device address (0xA0 or 0xA2)
4584 * bits 8-15: offset (0-255)
4585 * bits 16-23: value to write
4586 * bits 24-31: unused
4587 * Output: 8-bit value written
4590 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4592 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4593 device_t dev = pf->dev;
4594 int input = -1, error = 0;
4595 u8 dev_addr, offset, value;
4597 /* Read in I2C write parameters */
4598 error = sysctl_handle_int(oidp, &input, 0, req);
4599 if ((error) || (req->newptr == NULL))
4601 /* Validate device address */
4602 dev_addr = input & 0xFF;
4603 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4606 offset = (input >> 8) & 0xFF;
4607 value = (input >> 16) & 0xFF;
4609 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4613 device_printf(dev, "%02X written\n", value);
4618 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4619 u8 bit_pos, int *is_set)
4621 device_t dev = pf->dev;
4622 struct i40e_hw *hw = &pf->hw;
4623 enum i40e_status_code status;
4625 status = i40e_aq_get_phy_capabilities(hw,
4626 FALSE, FALSE, abilities, NULL);
4629 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4630 __func__, i40e_stat_str(hw, status),
4631 i40e_aq_str(hw, hw->aq.asq_last_status));
4635 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4640 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4641 u8 bit_pos, int set)
4643 device_t dev = pf->dev;
4644 struct i40e_hw *hw = &pf->hw;
4645 struct i40e_aq_set_phy_config config;
4646 enum i40e_status_code status;
4648 /* Set new PHY config */
4649 memset(&config, 0, sizeof(config));
4650 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4652 config.fec_config |= bit_pos;
4653 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4654 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4655 config.phy_type = abilities->phy_type;
4656 config.phy_type_ext = abilities->phy_type_ext;
4657 config.link_speed = abilities->link_speed;
4658 config.eee_capability = abilities->eee_capability;
4659 config.eeer = abilities->eeer_val;
4660 config.low_power_ctrl = abilities->d3_lpan;
4661 status = i40e_aq_set_phy_config(hw, &config, NULL);
4665 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4666 __func__, i40e_stat_str(hw, status),
4667 i40e_aq_str(hw, hw->aq.asq_last_status));
4676 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4678 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4679 int mode, error = 0;
4681 struct i40e_aq_get_phy_abilities_resp abilities;
4682 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4685 /* Read in new mode */
4686 error = sysctl_handle_int(oidp, &mode, 0, req);
4687 if ((error) || (req->newptr == NULL))
4690 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4694 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4696 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4697 int mode, error = 0;
4699 struct i40e_aq_get_phy_abilities_resp abilities;
4700 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4703 /* Read in new mode */
4704 error = sysctl_handle_int(oidp, &mode, 0, req);
4705 if ((error) || (req->newptr == NULL))
4708 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4712 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4714 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4715 int mode, error = 0;
4717 struct i40e_aq_get_phy_abilities_resp abilities;
4718 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4721 /* Read in new mode */
4722 error = sysctl_handle_int(oidp, &mode, 0, req);
4723 if ((error) || (req->newptr == NULL))
4726 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4730 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4732 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4733 int mode, error = 0;
4735 struct i40e_aq_get_phy_abilities_resp abilities;
4736 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4739 /* Read in new mode */
4740 error = sysctl_handle_int(oidp, &mode, 0, req);
4741 if ((error) || (req->newptr == NULL))
4744 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4748 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4750 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4751 int mode, error = 0;
4753 struct i40e_aq_get_phy_abilities_resp abilities;
4754 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4757 /* Read in new mode */
4758 error = sysctl_handle_int(oidp, &mode, 0, req);
4759 if ((error) || (req->newptr == NULL))
4762 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4766 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4768 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4769 struct i40e_hw *hw = &pf->hw;
4770 device_t dev = pf->dev;
4773 enum i40e_status_code status;
4775 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4777 device_printf(dev, "Could not allocate sbuf for output.\n");
4782 /* This amount is only necessary if reading the entire cluster into memory */
4783 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4784 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4785 if (final_buff == NULL) {
4786 device_printf(dev, "Could not allocate memory for output.\n");
4789 int final_buff_len = 0;
4795 u16 curr_buff_size = 4096;
4796 u8 curr_next_table = 0;
4797 u32 curr_next_index = 0;
4803 sbuf_cat(buf, "\n");
4806 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4807 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4809 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4810 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4814 /* copy info out of temp buffer */
4815 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4816 final_buff_len += ret_buff_size;
4818 if (ret_next_table != curr_next_table) {
4819 /* We're done with the current table; we can dump out read data. */
4820 sbuf_printf(buf, "%d:", curr_next_table);
4821 int bytes_printed = 0;
4822 while (bytes_printed <= final_buff_len) {
4823 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4824 bytes_printed += 16;
4826 sbuf_cat(buf, "\n");
4828 /* The entire cluster has been read; we're finished */
4829 if (ret_next_table == 0xFF)
4832 /* Otherwise clear the output buffer and continue reading */
4833 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4837 if (ret_next_index == 0xFFFFFFFF)
4840 bzero(dump_buf, sizeof(dump_buf));
4841 curr_next_table = ret_next_table;
4842 curr_next_index = ret_next_index;
4846 free(final_buff, M_DEVBUF);
4848 error = sbuf_finish(buf);
4850 device_printf(dev, "Error finishing sbuf: %d\n", error);
4857 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4859 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4860 struct i40e_hw *hw = &pf->hw;
4861 device_t dev = pf->dev;
4863 int state, new_state;
4864 enum i40e_status_code status;
4865 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4867 /* Read in new mode */
4868 error = sysctl_handle_int(oidp, &new_state, 0, req);
4869 if ((error) || (req->newptr == NULL))
4872 /* Already in requested state */
4873 if (new_state == state)
4876 if (new_state == 0) {
4877 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4878 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4882 if (pf->hw.aq.api_maj_ver < 1 ||
4883 (pf->hw.aq.api_maj_ver == 1 &&
4884 pf->hw.aq.api_min_ver < 7)) {
4885 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4889 i40e_aq_stop_lldp(&pf->hw, true, NULL);
4890 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4891 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4893 status = i40e_aq_start_lldp(&pf->hw, NULL);
4894 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4895 device_printf(dev, "FW LLDP agent is already running\n");
4896 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4903 * Get FW LLDP Agent status
4906 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4908 enum i40e_status_code ret = I40E_SUCCESS;
4909 struct i40e_lldp_variables lldp_cfg;
4910 struct i40e_hw *hw = &pf->hw;
4913 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4917 /* Get the LLDP AdminStatus for the current port */
4918 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4921 /* Check if LLDP agent is disabled */
4923 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4924 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4926 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4932 ixl_attach_get_link_status(struct ixl_pf *pf)
4934 struct i40e_hw *hw = &pf->hw;
4935 device_t dev = pf->dev;
4938 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4939 (hw->aq.fw_maj_ver < 4)) {
4940 i40e_msec_delay(75);
4941 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4943 device_printf(dev, "link restart failed, aq_err=%d\n",
4944 pf->hw.aq.asq_last_status);
4949 /* Determine link state */
4950 hw->phy.get_link_info = TRUE;
4951 i40e_get_link_status(hw, &pf->link_up);
4956 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4958 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4959 int requested = 0, error = 0;
4961 /* Read in new mode */
4962 error = sysctl_handle_int(oidp, &requested, 0, req);
4963 if ((error) || (req->newptr == NULL))
4966 /* Initiate the PF reset later in the admin task */
4967 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4973 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4975 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4976 struct i40e_hw *hw = &pf->hw;
4977 int requested = 0, error = 0;
4979 /* Read in new mode */
4980 error = sysctl_handle_int(oidp, &requested, 0, req);
4981 if ((error) || (req->newptr == NULL))
4984 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4990 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4992 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4993 struct i40e_hw *hw = &pf->hw;
4994 int requested = 0, error = 0;
4996 /* Read in new mode */
4997 error = sysctl_handle_int(oidp, &requested, 0, req);
4998 if ((error) || (req->newptr == NULL))
5001 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5007 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5009 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5010 struct i40e_hw *hw = &pf->hw;
5011 int requested = 0, error = 0;
5013 /* Read in new mode */
5014 error = sysctl_handle_int(oidp, &requested, 0, req);
5015 if ((error) || (req->newptr == NULL))
5018 /* TODO: Find out how to bypass this */
5019 if (!(rd32(hw, 0x000B818C) & 0x1)) {
5020 device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5023 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5029 * Print out mapping of TX queue indexes and Rx queue indexes
5033 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5035 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5036 struct ixl_vsi *vsi = &pf->vsi;
5037 device_t dev = pf->dev;
5041 struct ixl_rx_queue *rx_que = vsi->rx_queues;
5042 struct ixl_tx_queue *tx_que = vsi->tx_queues;
5044 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5046 device_printf(dev, "Could not allocate sbuf for output.\n");
5050 sbuf_cat(buf, "\n");
5051 for (int i = 0; i < vsi->num_rx_queues; i++) {
5052 rx_que = &vsi->rx_queues[i];
5053 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5055 for (int i = 0; i < vsi->num_tx_queues; i++) {
5056 tx_que = &vsi->tx_queues[i];
5057 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5060 error = sbuf_finish(buf);
5062 device_printf(dev, "Error finishing sbuf: %d\n", error);