1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
51 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 const char * const ixl_fc_string[6] = {
107 static char *ixl_fec_string[3] = {
109 "CL74 FC-FEC/BASE-R",
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
121 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
126 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 IXL_NVM_VERSION_HI_SHIFT,
131 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 IXL_NVM_VERSION_LO_SHIFT,
134 oem_ver, oem_build, oem_patch);
138 ixl_print_nvm_version(struct ixl_pf *pf)
140 struct i40e_hw *hw = &pf->hw;
141 device_t dev = pf->dev;
144 sbuf = sbuf_new_auto();
145 ixl_nvm_version_str(hw, sbuf);
147 device_printf(dev, "%s\n", sbuf_data(sbuf));
152 ixl_configure_tx_itr(struct ixl_pf *pf)
154 struct i40e_hw *hw = &pf->hw;
155 struct ixl_vsi *vsi = &pf->vsi;
156 struct ixl_tx_queue *que = vsi->tx_queues;
158 vsi->tx_itr_setting = pf->tx_itr;
160 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 struct tx_ring *txr = &que->txr;
163 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 vsi->tx_itr_setting);
165 txr->itr = vsi->tx_itr_setting;
166 txr->latency = IXL_AVE_LATENCY;
171 ixl_configure_rx_itr(struct ixl_pf *pf)
173 struct i40e_hw *hw = &pf->hw;
174 struct ixl_vsi *vsi = &pf->vsi;
175 struct ixl_rx_queue *que = vsi->rx_queues;
177 vsi->rx_itr_setting = pf->rx_itr;
179 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 struct rx_ring *rxr = &que->rxr;
182 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 vsi->rx_itr_setting);
184 rxr->itr = vsi->rx_itr_setting;
185 rxr->latency = IXL_AVE_LATENCY;
190 * Write PF ITR values to queue ITR registers.
193 ixl_configure_itr(struct ixl_pf *pf)
195 ixl_configure_tx_itr(pf);
196 ixl_configure_rx_itr(pf);
199 /*********************************************************************
201 * Get the hardware capabilities
203 **********************************************************************/
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
208 struct i40e_aqc_list_capabilities_element_resp *buf;
209 struct i40e_hw *hw = &pf->hw;
210 device_t dev = pf->dev;
211 enum i40e_status_code status;
212 int len, i2c_intfc_num;
216 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
218 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 device_printf(dev, "Unable to allocate cap memory\n");
224 /* This populates the hw struct */
225 status = i40e_aq_discover_capabilities(hw, buf, len,
226 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
228 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
230 /* retry once with a larger buffer */
234 } else if (status != I40E_SUCCESS) {
235 device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
241 * Some devices have both MDIO and I2C; since this isn't reported
242 * by the FW, check registers to see if an I2C interface exists.
244 i2c_intfc_num = ixl_find_i2c_interface(pf);
245 if (i2c_intfc_num != -1)
248 /* Determine functions to use for driver I2C accesses */
249 switch (pf->i2c_access_method) {
251 if (hw->mac.type == I40E_MAC_XL710 &&
252 hw->aq.api_maj_ver == 1 &&
253 hw->aq.api_min_ver >= 7) {
254 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
257 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
263 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
267 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
271 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
275 /* Should not happen */
276 device_printf(dev, "Error setting I2C access functions\n");
280 /* Print a subset of the capability information. */
282 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
293 /* For the set_advertise sysctl */
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
297 device_t dev = pf->dev;
300 /* Make sure to initialize the device to the complete list of
301 * supported speeds on driver load, to ensure unloading and
302 * reloading the driver will restore this value.
304 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
306 /* Non-fatal error */
307 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
312 pf->advertised_speed =
313 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
319 enum i40e_status_code status = 0;
320 struct i40e_hw *hw = &pf->hw;
321 device_t dev = pf->dev;
323 /* Shutdown LAN HMC */
324 if (hw->hmc.hmc_obj) {
325 status = i40e_shutdown_lan_hmc(hw);
328 "init: LAN HMC shutdown failure; status %s\n",
329 i40e_stat_str(hw, status));
334 /* Shutdown admin queue */
335 ixl_disable_intr0(hw);
336 status = i40e_shutdown_adminq(hw);
339 "init: Admin Queue shutdown failure; status %s\n",
340 i40e_stat_str(hw, status));
342 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
348 ixl_reset(struct ixl_pf *pf)
350 struct i40e_hw *hw = &pf->hw;
351 device_t dev = pf->dev;
355 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
357 error = i40e_pf_reset(hw);
359 device_printf(dev, "init: PF reset failure\n");
364 error = i40e_init_adminq(hw);
366 device_printf(dev, "init: Admin queue init failure;"
367 " status code %d\n", error);
372 i40e_clear_pxe_mode(hw);
375 error = ixl_get_hw_capabilities(pf);
377 device_printf(dev, "init: Error retrieving HW capabilities;"
378 " status code %d\n", error);
382 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 hw->func_caps.num_rx_qp, 0, 0);
385 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
391 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
393 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
399 // XXX: possible fix for panic, but our failure recovery is still broken
400 error = ixl_switch_config(pf);
402 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
407 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
410 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 " aq_err %d\n", error, hw->aq.asq_last_status);
416 error = i40e_set_fc(hw, &set_fc_err_mask, true);
418 device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
423 // XXX: (Rebuild VSIs?)
425 /* Firmware delay workaround */
426 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 (hw->aq.fw_maj_ver < 4)) {
429 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
431 device_printf(dev, "init: link restart failed, aq_err %d\n",
432 hw->aq.asq_last_status);
438 /* Re-enable admin queue interrupt */
440 ixl_configure_intr0_msix(pf);
441 ixl_enable_intr0(hw);
447 ixl_rebuild_hw_structs_after_reset(pf);
449 /* The PF reset should have cleared any critical errors */
450 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
453 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 reg |= IXL_ICR0_CRIT_ERR_MASK;
455 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
462 * TODO: Make sure this properly handles admin queue / single rx queue intr
467 struct ixl_pf *pf = arg;
468 struct i40e_hw *hw = &pf->hw;
469 struct ixl_vsi *vsi = &pf->vsi;
470 struct ixl_rx_queue *que = vsi->rx_queues;
476 // TODO: Check against proper field
478 /* Clear PBA at start of ISR if using legacy interrupts */
480 wr32(hw, I40E_PFINT_DYN_CTL0,
481 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
485 icr0 = rd32(hw, I40E_PFINT_ICR0);
489 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 iflib_iov_intr_deferred(vsi->ctx);
493 // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 iflib_admin_intr_deferred(vsi->ctx);
497 // TODO: Is intr0 enabled somewhere else?
498 ixl_enable_intr0(hw);
500 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 return (FILTER_SCHEDULE_THREAD);
503 return (FILTER_HANDLED);
507 /*********************************************************************
509 * MSI-X VSI Interrupt Service routine
511 **********************************************************************/
513 ixl_msix_que(void *arg)
515 struct ixl_rx_queue *rx_que = arg;
519 ixl_set_queue_rx_itr(rx_que);
520 // ixl_set_queue_tx_itr(que);
522 return (FILTER_SCHEDULE_THREAD);
526 /*********************************************************************
528 * MSI-X Admin Queue Interrupt Service routine
530 **********************************************************************/
532 ixl_msix_adminq(void *arg)
534 struct ixl_pf *pf = arg;
535 struct i40e_hw *hw = &pf->hw;
536 device_t dev = pf->dev;
537 u32 reg, mask, rstat_reg;
538 bool do_task = FALSE;
540 DDPRINTF(dev, "begin");
544 reg = rd32(hw, I40E_PFINT_ICR0);
546 * For masking off interrupt causes that need to be handled before
547 * they can be re-enabled
549 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
551 /* Check on the cause */
552 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
557 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
563 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 device_printf(dev, "Reset Requested!\n");
566 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 device_printf(dev, "Reset type: ");
571 /* These others might be handled similarly to an EMPR reset */
572 case I40E_RESET_CORER:
575 case I40E_RESET_GLOBR:
578 case I40E_RESET_EMPR:
585 /* overload admin queue task to check reset progress */
586 atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
591 * PE / PCI / ECC exceptions are all handled in the same way:
592 * mask out these three causes, then request a PF reset
594 * TODO: I think at least ECC error requires a GLOBR, not PFR
596 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597 device_printf(dev, "ECC Error detected!\n");
598 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 device_printf(dev, "PCI Exception detected!\n");
600 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 /* Checks against the conditions above */
603 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 atomic_set_32(&pf->state,
606 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
610 // TODO: Linux driver never re-enables this interrupt once it has been detected
611 // Then what is supposed to happen? A PF reset? Should it never happen?
612 // TODO: Parse out this error into something human readable
613 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 device_printf(dev, "HMC Error detected!\n");
617 device_printf(dev, "INFO 0x%08x\n", reg);
618 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 device_printf(dev, "DATA 0x%08x\n", reg);
620 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
625 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 iflib_iov_intr_deferred(pf->vsi.ctx);
631 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 ixl_enable_intr0(hw);
635 return (FILTER_SCHEDULE_THREAD);
637 return (FILTER_HANDLED);
641 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
643 struct ixl_vsi *vsi = arg;
645 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl));
650 /*********************************************************************
653 * Routines for multicast and vlan filter management.
655 *********************************************************************/
657 ixl_add_multi(struct ixl_vsi *vsi)
659 struct ifnet *ifp = vsi->ifp;
660 struct i40e_hw *hw = vsi->hw;
663 IOCTL_DEBUGOUT("ixl_add_multi: begin");
666 ** First just get a count, to decide if we
667 ** we simply use multicast promiscuous.
669 mcnt = if_llmaddr_count(ifp);
670 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
671 /* delete existing MC filters */
672 ixl_del_hw_filters(vsi, mcnt);
673 i40e_aq_set_vsi_multicast_promiscuous(hw,
674 vsi->seid, TRUE, NULL);
678 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi);
680 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
681 ixl_add_hw_filters(vsi, flags, mcnt);
684 IOCTL_DEBUGOUT("ixl_add_multi: end");
688 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
690 struct ixl_mac_filter *f = arg;
692 if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl)))
699 ixl_del_multi(struct ixl_vsi *vsi)
701 struct ifnet *ifp = vsi->ifp;
702 struct ixl_mac_filter *f;
705 IOCTL_DEBUGOUT("ixl_del_multi: begin");
707 SLIST_FOREACH(f, &vsi->ftl, next)
708 if ((f->flags & IXL_FILTER_USED) &&
709 (f->flags & IXL_FILTER_MC) &&
710 (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) {
711 f->flags |= IXL_FILTER_DEL;
716 ixl_del_hw_filters(vsi, mcnt);
722 ixl_link_up_msg(struct ixl_pf *pf)
724 struct i40e_hw *hw = &pf->hw;
725 struct ifnet *ifp = pf->vsi.ifp;
726 char *req_fec_string, *neg_fec_string;
729 fec_abilities = hw->phy.link_info.req_fec_info;
730 /* If both RS and KR are requested, only show RS */
731 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
732 req_fec_string = ixl_fec_string[0];
733 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
734 req_fec_string = ixl_fec_string[1];
736 req_fec_string = ixl_fec_string[2];
738 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
739 neg_fec_string = ixl_fec_string[0];
740 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
741 neg_fec_string = ixl_fec_string[1];
743 neg_fec_string = ixl_fec_string[2];
745 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
747 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
748 req_fec_string, neg_fec_string,
749 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
750 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
751 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
752 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
753 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
754 ixl_fc_string[1] : ixl_fc_string[0]);
758 * Configure admin queue/misc interrupt cause registers in hardware.
761 ixl_configure_intr0_msix(struct ixl_pf *pf)
763 struct i40e_hw *hw = &pf->hw;
766 /* First set up the adminq - vector 0 */
767 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
768 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
770 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
771 I40E_PFINT_ICR0_ENA_GRST_MASK |
772 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
773 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
774 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
775 I40E_PFINT_ICR0_ENA_VFLR_MASK |
776 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
777 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
778 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
781 * 0x7FF is the end of the queue list.
782 * This means we won't use MSI-X vector 0 for a queue interrupt
785 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
786 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
787 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
789 wr32(hw, I40E_PFINT_DYN_CTL0,
790 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
791 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
793 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
797 * Configure queue interrupt cause registers in hardware.
799 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
802 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
804 struct i40e_hw *hw = &pf->hw;
805 struct ixl_vsi *vsi = &pf->vsi;
809 // TODO: See if max is really necessary
810 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
811 /* Make sure interrupt is disabled */
812 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
813 /* Set linked list head to point to corresponding RX queue
814 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
815 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
816 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
817 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
818 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
819 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
821 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
822 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
823 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
824 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
825 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
826 wr32(hw, I40E_QINT_RQCTL(i), reg);
828 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
829 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
830 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
831 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
832 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
833 wr32(hw, I40E_QINT_TQCTL(i), reg);
838 * Configure for single interrupt vector operation
841 ixl_configure_legacy(struct ixl_pf *pf)
843 struct i40e_hw *hw = &pf->hw;
844 struct ixl_vsi *vsi = &pf->vsi;
850 vsi->tx_itr_setting = pf->tx_itr;
851 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
852 vsi->tx_itr_setting);
853 txr->itr = vsi->tx_itr_setting;
855 vsi->rx_itr_setting = pf->rx_itr;
856 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
857 vsi->rx_itr_setting);
858 rxr->itr = vsi->rx_itr_setting;
859 /* XXX: Assuming only 1 queue in single interrupt mode */
861 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
863 /* Setup "other" causes */
864 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
865 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
866 | I40E_PFINT_ICR0_ENA_GRST_MASK
867 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
868 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
869 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
870 | I40E_PFINT_ICR0_ENA_VFLR_MASK
871 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
873 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
875 /* No ITR for non-queue interrupts */
876 wr32(hw, I40E_PFINT_STAT_CTL0,
877 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
879 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
880 wr32(hw, I40E_PFINT_LNKLST0, 0);
882 /* Associate the queue pair to the vector and enable the q int */
883 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
884 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
885 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
886 wr32(hw, I40E_QINT_RQCTL(0), reg);
888 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
889 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
890 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
891 wr32(hw, I40E_QINT_TQCTL(0), reg);
895 ixl_free_pci_resources(struct ixl_pf *pf)
897 struct ixl_vsi *vsi = &pf->vsi;
898 device_t dev = iflib_get_dev(vsi->ctx);
899 struct ixl_rx_queue *rx_que = vsi->rx_queues;
901 /* We may get here before stations are set up */
906 ** Release all MSI-X VSI resources:
908 iflib_irq_free(vsi->ctx, &vsi->irq);
910 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
911 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
913 if (pf->pci_mem != NULL)
914 bus_release_resource(dev, SYS_RES_MEMORY,
915 rman_get_rid(pf->pci_mem), pf->pci_mem);
919 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
921 /* Display supported media types */
922 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
923 ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
925 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
926 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
927 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
928 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
929 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
930 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
932 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
933 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
934 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
935 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
937 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
938 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
939 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
940 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
941 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
942 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
944 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
945 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
946 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
947 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
948 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
949 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
950 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
951 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
952 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
953 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
955 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
956 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
958 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
959 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
960 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
961 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
962 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
963 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
964 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
965 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
966 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
967 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
968 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
970 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
971 ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
973 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
974 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
975 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
976 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
978 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
979 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
980 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
981 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
982 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
983 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
984 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
985 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
986 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
987 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
988 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
989 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
992 /*********************************************************************
994 * Setup networking device structure and register an interface.
996 **********************************************************************/
998 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1000 struct ixl_vsi *vsi = &pf->vsi;
1001 if_ctx_t ctx = vsi->ctx;
1002 struct i40e_hw *hw = &pf->hw;
1003 struct ifnet *ifp = iflib_get_ifp(ctx);
1004 struct i40e_aq_get_phy_abilities_resp abilities;
1005 enum i40e_status_code aq_error = 0;
1007 INIT_DBG_DEV(dev, "begin");
1009 vsi->shared->isc_max_frame_size =
1010 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 + ETHER_VLAN_ENCAP_LEN;
1013 aq_error = i40e_aq_get_phy_capabilities(hw,
1014 FALSE, TRUE, &abilities, NULL);
1015 /* May need delay to detect fiber correctly */
1016 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1017 /* TODO: Maybe just retry this in a task... */
1018 i40e_msec_delay(200);
1019 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1020 TRUE, &abilities, NULL);
1023 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1024 device_printf(dev, "Unknown PHY type detected!\n");
1027 "Error getting supported media types, err %d,"
1028 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1030 pf->supported_speeds = abilities.link_speed;
1031 #if __FreeBSD_version >= 1100000
1032 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1034 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1037 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1040 /* Use autoselect media by default */
1041 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1042 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1048 * Input: bitmap of enum i40e_aq_link_speed
1051 ixl_max_aq_speed_to_value(u8 link_speeds)
1053 if (link_speeds & I40E_LINK_SPEED_40GB)
1055 if (link_speeds & I40E_LINK_SPEED_25GB)
1057 if (link_speeds & I40E_LINK_SPEED_20GB)
1059 if (link_speeds & I40E_LINK_SPEED_10GB)
1061 if (link_speeds & I40E_LINK_SPEED_1GB)
1063 if (link_speeds & I40E_LINK_SPEED_100MB)
1064 return IF_Mbps(100);
1066 /* Minimum supported link speed */
1067 return IF_Mbps(100);
1071 ** Run when the Admin Queue gets a link state change interrupt.
1074 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1076 struct i40e_hw *hw = &pf->hw;
1077 device_t dev = iflib_get_dev(pf->vsi.ctx);
1078 struct i40e_aqc_get_link_status *status =
1079 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1081 /* Request link status from adapter */
1082 hw->phy.get_link_info = TRUE;
1083 i40e_get_link_status(hw, &pf->link_up);
1085 /* Print out message if an unqualified module is found */
1086 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1087 (pf->advertised_speed) &&
1088 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1089 (!(status->link_info & I40E_AQ_LINK_UP)))
1090 device_printf(dev, "Link failed because "
1091 "an unqualified module was detected!\n");
1093 /* OS link info is updated elsewhere */
1096 /*********************************************************************
1098 * Get Firmware Switch configuration
1099 * - this will need to be more robust when more complex
1100 * switch configurations are enabled.
1102 **********************************************************************/
1104 ixl_switch_config(struct ixl_pf *pf)
1106 struct i40e_hw *hw = &pf->hw;
1107 struct ixl_vsi *vsi = &pf->vsi;
1108 device_t dev = iflib_get_dev(vsi->ctx);
1109 struct i40e_aqc_get_switch_config_resp *sw_config;
1110 u8 aq_buf[I40E_AQ_LARGE_BUF];
1114 memset(&aq_buf, 0, sizeof(aq_buf));
1115 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1116 ret = i40e_aq_get_switch_config(hw, sw_config,
1117 sizeof(aq_buf), &next, NULL);
1119 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1120 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1123 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1125 "Switch config: header reported: %d in structure, %d total\n",
1126 sw_config->header.num_reported, sw_config->header.num_total);
1127 for (int i = 0; i < sw_config->header.num_reported; i++) {
1129 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1130 sw_config->element[i].element_type,
1131 sw_config->element[i].seid,
1132 sw_config->element[i].uplink_seid,
1133 sw_config->element[i].downlink_seid);
1136 /* Simplified due to a single VSI */
1137 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1138 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1139 vsi->seid = sw_config->element[0].seid;
1143 /*********************************************************************
1145 * Initialize the VSI: this handles contexts, which means things
1146 * like the number of descriptors, buffer size,
1147 * plus we init the rings thru this function.
1149 **********************************************************************/
1151 ixl_initialize_vsi(struct ixl_vsi *vsi)
1153 struct ixl_pf *pf = vsi->back;
1154 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
1155 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1156 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1157 device_t dev = iflib_get_dev(vsi->ctx);
1158 struct i40e_hw *hw = vsi->hw;
1159 struct i40e_vsi_context ctxt;
1163 memset(&ctxt, 0, sizeof(ctxt));
1164 ctxt.seid = vsi->seid;
1165 if (pf->veb_seid != 0)
1166 ctxt.uplink_seid = pf->veb_seid;
1167 ctxt.pf_num = hw->pf_id;
1168 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1170 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1171 " aq_error %d\n", err, hw->aq.asq_last_status);
1174 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1175 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1176 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1177 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1178 ctxt.uplink_seid, ctxt.vsi_number,
1179 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1180 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1181 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1183 ** Set the queue and traffic class bits
1184 ** - when multiple traffic classes are supported
1185 ** this will need to be more robust.
1187 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1188 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1189 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1190 ctxt.info.queue_mapping[0] = 0;
1192 * This VSI will only use traffic class 0; start traffic class 0's
1193 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1194 * the driver may not use all of them).
1196 tc_queues = fls(pf->qtag.num_allocated) - 1;
1197 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1198 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1199 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1200 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1202 /* Set VLAN receive stripping mode */
1203 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1204 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1205 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1206 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1208 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1211 /* Set TCP Enable for iWARP capable VSI */
1212 if (ixl_enable_iwarp && pf->iw_enabled) {
1213 ctxt.info.valid_sections |=
1214 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1215 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1218 /* Save VSI number and info for use later */
1219 vsi->vsi_num = ctxt.vsi_number;
1220 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1222 /* Reset VSI statistics */
1223 ixl_vsi_reset_stats(vsi);
1224 vsi->hw_filters_add = 0;
1225 vsi->hw_filters_del = 0;
1227 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1229 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1231 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1232 " aq_error %d\n", err, hw->aq.asq_last_status);
1236 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1237 struct tx_ring *txr = &tx_que->txr;
1238 struct i40e_hmc_obj_txq tctx;
1241 /* Setup the HMC TX Context */
1242 bzero(&tctx, sizeof(tctx));
1243 tctx.new_context = 1;
1244 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1245 tctx.qlen = scctx->isc_ntxd[0];
1246 tctx.fc_ena = 0; /* Disable FCoE */
1248 * This value needs to pulled from the VSI that this queue
1249 * is assigned to. Index into array is traffic class.
1251 tctx.rdylist = vsi->info.qs_handle[0];
1253 * Set these to enable Head Writeback
1254 * - Address is last entry in TX ring (reserved for HWB index)
1255 * Leave these as 0 for Descriptor Writeback
1257 if (vsi->enable_head_writeback) {
1258 tctx.head_wb_ena = 1;
1259 tctx.head_wb_addr = txr->tx_paddr +
1260 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1262 tctx.head_wb_ena = 0;
1263 tctx.head_wb_addr = 0;
1265 tctx.rdylist_act = 0;
1266 err = i40e_clear_lan_tx_queue_context(hw, i);
1268 device_printf(dev, "Unable to clear TX context\n");
1271 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1273 device_printf(dev, "Unable to set TX context\n");
1276 /* Associate the ring with this PF */
1277 txctl = I40E_QTX_CTL_PF_QUEUE;
1278 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1279 I40E_QTX_CTL_PF_INDX_MASK);
1280 wr32(hw, I40E_QTX_CTL(i), txctl);
1283 /* Do ring (re)init */
1284 ixl_init_tx_ring(vsi, tx_que);
1286 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1287 struct rx_ring *rxr = &rx_que->rxr;
1288 struct i40e_hmc_obj_rxq rctx;
1290 /* Next setup the HMC RX Context */
1291 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1293 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1295 /* Set up an RX context for the HMC */
1296 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1297 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1298 /* ignore header split for now */
1299 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1300 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1301 scctx->isc_max_frame_size : max_rxmax;
1303 rctx.dsize = 1; /* do 32byte descriptors */
1304 rctx.hsplit_0 = 0; /* no header split */
1305 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1306 rctx.qlen = scctx->isc_nrxd[0];
1307 rctx.tphrdesc_ena = 1;
1308 rctx.tphwdesc_ena = 1;
1309 rctx.tphdata_ena = 0; /* Header Split related */
1310 rctx.tphhead_ena = 0; /* Header Split related */
1311 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
1314 rctx.showiv = 1; /* Strip inner VLAN header */
1315 rctx.fc_ena = 0; /* Disable FCoE */
1316 rctx.prefena = 1; /* Prefetch descriptors */
1318 err = i40e_clear_lan_rx_queue_context(hw, i);
1321 "Unable to clear RX context %d\n", i);
1324 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1326 device_printf(dev, "Unable to set RX context %d\n", i);
1329 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1335 ixl_free_mac_filters(struct ixl_vsi *vsi)
1337 struct ixl_mac_filter *f;
1339 while (!SLIST_EMPTY(&vsi->ftl)) {
1340 f = SLIST_FIRST(&vsi->ftl);
1341 SLIST_REMOVE_HEAD(&vsi->ftl, next);
1347 ** Provide a update to the queue RX
1348 ** interrupt moderation value.
1351 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1353 struct ixl_vsi *vsi = que->vsi;
1354 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1355 struct i40e_hw *hw = vsi->hw;
1356 struct rx_ring *rxr = &que->rxr;
1361 /* Idle, do nothing */
1362 if (rxr->bytes == 0)
1365 if (pf->dynamic_rx_itr) {
1366 rx_bytes = rxr->bytes/rxr->itr;
1369 /* Adjust latency range */
1370 switch (rxr->latency) {
1371 case IXL_LOW_LATENCY:
1372 if (rx_bytes > 10) {
1373 rx_latency = IXL_AVE_LATENCY;
1374 rx_itr = IXL_ITR_20K;
1377 case IXL_AVE_LATENCY:
1378 if (rx_bytes > 20) {
1379 rx_latency = IXL_BULK_LATENCY;
1380 rx_itr = IXL_ITR_8K;
1381 } else if (rx_bytes <= 10) {
1382 rx_latency = IXL_LOW_LATENCY;
1383 rx_itr = IXL_ITR_100K;
1386 case IXL_BULK_LATENCY:
1387 if (rx_bytes <= 20) {
1388 rx_latency = IXL_AVE_LATENCY;
1389 rx_itr = IXL_ITR_20K;
1394 rxr->latency = rx_latency;
1396 if (rx_itr != rxr->itr) {
1397 /* do an exponential smoothing */
1398 rx_itr = (10 * rx_itr * rxr->itr) /
1399 ((9 * rx_itr) + rxr->itr);
1400 rxr->itr = min(rx_itr, IXL_MAX_ITR);
1401 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1402 rxr->me), rxr->itr);
1404 } else { /* We may have have toggled to non-dynamic */
1405 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1406 vsi->rx_itr_setting = pf->rx_itr;
1407 /* Update the hardware if needed */
1408 if (rxr->itr != vsi->rx_itr_setting) {
1409 rxr->itr = vsi->rx_itr_setting;
1410 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1411 rxr->me), rxr->itr);
1420 ** Provide a update to the queue TX
1421 ** interrupt moderation value.
1424 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1426 struct ixl_vsi *vsi = que->vsi;
1427 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1428 struct i40e_hw *hw = vsi->hw;
1429 struct tx_ring *txr = &que->txr;
1435 /* Idle, do nothing */
1436 if (txr->bytes == 0)
1439 if (pf->dynamic_tx_itr) {
1440 tx_bytes = txr->bytes/txr->itr;
1443 switch (txr->latency) {
1444 case IXL_LOW_LATENCY:
1445 if (tx_bytes > 10) {
1446 tx_latency = IXL_AVE_LATENCY;
1447 tx_itr = IXL_ITR_20K;
1450 case IXL_AVE_LATENCY:
1451 if (tx_bytes > 20) {
1452 tx_latency = IXL_BULK_LATENCY;
1453 tx_itr = IXL_ITR_8K;
1454 } else if (tx_bytes <= 10) {
1455 tx_latency = IXL_LOW_LATENCY;
1456 tx_itr = IXL_ITR_100K;
1459 case IXL_BULK_LATENCY:
1460 if (tx_bytes <= 20) {
1461 tx_latency = IXL_AVE_LATENCY;
1462 tx_itr = IXL_ITR_20K;
1467 txr->latency = tx_latency;
1469 if (tx_itr != txr->itr) {
1470 /* do an exponential smoothing */
1471 tx_itr = (10 * tx_itr * txr->itr) /
1472 ((9 * tx_itr) + txr->itr);
1473 txr->itr = min(tx_itr, IXL_MAX_ITR);
1474 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1475 txr->me), txr->itr);
1478 } else { /* We may have have toggled to non-dynamic */
1479 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1480 vsi->tx_itr_setting = pf->tx_itr;
1481 /* Update the hardware if needed */
1482 if (txr->itr != vsi->tx_itr_setting) {
1483 txr->itr = vsi->tx_itr_setting;
1484 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1485 txr->me), txr->itr);
1495 * ixl_sysctl_qtx_tail_handler
1496 * Retrieves I40E_QTX_TAIL value from hardware
1500 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1502 struct ixl_tx_queue *tx_que;
1506 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1507 if (!tx_que) return 0;
1509 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1510 error = sysctl_handle_int(oidp, &val, 0, req);
1511 if (error || !req->newptr)
1517 * ixl_sysctl_qrx_tail_handler
1518 * Retrieves I40E_QRX_TAIL value from hardware
1522 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1524 struct ixl_rx_queue *rx_que;
1528 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1529 if (!rx_que) return 0;
1531 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1532 error = sysctl_handle_int(oidp, &val, 0, req);
1533 if (error || !req->newptr)
1540 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1541 * Writes to the ITR registers immediately.
1544 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1546 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1547 device_t dev = pf->dev;
1549 int requested_tx_itr;
1551 requested_tx_itr = pf->tx_itr;
1552 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1553 if ((error) || (req->newptr == NULL))
1555 if (pf->dynamic_tx_itr) {
1557 "Cannot set TX itr value while dynamic TX itr is enabled\n");
1560 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1562 "Invalid TX itr value; value must be between 0 and %d\n",
1567 pf->tx_itr = requested_tx_itr;
1568 ixl_configure_tx_itr(pf);
1574 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1575 * Writes to the ITR registers immediately.
1578 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1580 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1581 device_t dev = pf->dev;
1583 int requested_rx_itr;
1585 requested_rx_itr = pf->rx_itr;
1586 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1587 if ((error) || (req->newptr == NULL))
1589 if (pf->dynamic_rx_itr) {
1591 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1594 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1596 "Invalid RX itr value; value must be between 0 and %d\n",
1601 pf->rx_itr = requested_rx_itr;
1602 ixl_configure_rx_itr(pf);
1608 ixl_add_hw_stats(struct ixl_pf *pf)
1610 struct ixl_vsi *vsi = &pf->vsi;
1611 device_t dev = iflib_get_dev(vsi->ctx);
1612 struct i40e_hw_port_stats *pf_stats = &pf->stats;
1614 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1615 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1616 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1618 /* Driver statistics */
1619 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1620 CTLFLAG_RD, &pf->admin_irq,
1621 "Admin Queue IRQs received");
1623 ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1625 ixl_add_queues_sysctls(dev, vsi);
1627 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1631 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1632 struct sysctl_oid_list *child,
1633 struct i40e_hw_port_stats *stats)
1635 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1636 CTLFLAG_RD, NULL, "Mac Statistics");
1637 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1639 struct i40e_eth_stats *eth_stats = &stats->eth;
1640 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1642 struct ixl_sysctl_info ctls[] =
1644 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1645 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1646 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1647 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1648 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1649 /* Packet Reception Stats */
1650 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1651 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1652 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1653 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1654 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1655 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1656 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1657 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1658 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1659 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1660 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1661 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1662 /* Packet Transmission Stats */
1663 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1664 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1665 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1666 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1667 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1668 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1669 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1671 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1672 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1673 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1674 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1679 struct ixl_sysctl_info *entry = ctls;
1680 while (entry->stat != 0)
1682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1683 CTLFLAG_RD, entry->stat,
1684 entry->description);
1690 ixl_set_rss_key(struct ixl_pf *pf)
1692 struct i40e_hw *hw = &pf->hw;
1693 struct ixl_vsi *vsi = &pf->vsi;
1694 device_t dev = pf->dev;
1695 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1696 enum i40e_status_code status;
1699 /* Fetch the configured RSS key */
1700 rss_getkey((uint8_t *) &rss_seed);
1702 ixl_get_default_rss_key(rss_seed);
1704 /* Fill out hash function seed */
1705 if (hw->mac.type == I40E_MAC_X722) {
1706 struct i40e_aqc_get_set_rss_key_data key_data;
1707 bcopy(rss_seed, &key_data, 52);
1708 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1711 "i40e_aq_set_rss_key status %s, error %s\n",
1712 i40e_stat_str(hw, status),
1713 i40e_aq_str(hw, hw->aq.asq_last_status));
1715 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1716 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1721 * Configure enabled PCTYPES for RSS.
1724 ixl_set_rss_pctypes(struct ixl_pf *pf)
1726 struct i40e_hw *hw = &pf->hw;
1727 u64 set_hena = 0, hena;
1730 u32 rss_hash_config;
1732 rss_hash_config = rss_gethashconfig();
1733 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1734 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1735 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1736 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1737 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1738 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1739 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1740 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1741 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1742 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1743 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1744 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1745 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1746 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1748 if (hw->mac.type == I40E_MAC_X722)
1749 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1751 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1753 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1754 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1756 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1757 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1762 ixl_set_rss_hlut(struct ixl_pf *pf)
1764 struct i40e_hw *hw = &pf->hw;
1765 struct ixl_vsi *vsi = &pf->vsi;
1766 device_t dev = iflib_get_dev(vsi->ctx);
1768 int lut_entry_width;
1770 enum i40e_status_code status;
1772 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1774 /* Populate the LUT with max no. of queues in round robin fashion */
1776 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1779 * Fetch the RSS bucket id for the given indirection entry.
1780 * Cap it at the number of configured buckets (which is
1783 que_id = rss_get_indirection_to_bucket(i);
1784 que_id = que_id % vsi->num_rx_queues;
1786 que_id = i % vsi->num_rx_queues;
1788 lut = (que_id & ((0x1 << lut_entry_width) - 1));
1792 if (hw->mac.type == I40E_MAC_X722) {
1793 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1795 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1796 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1798 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1799 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1805 ** Setup the PF's RSS parameters.
1808 ixl_config_rss(struct ixl_pf *pf)
1810 ixl_set_rss_key(pf);
1811 ixl_set_rss_pctypes(pf);
1812 ixl_set_rss_hlut(pf);
1816 ** This routine updates vlan filters, called by init
1817 ** it scans the filter table and then updates the hw
1818 ** after a soft reset.
1821 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1823 struct ixl_mac_filter *f;
1826 if (vsi->num_vlans == 0)
1829 ** Scan the filter list for vlan entries,
1830 ** mark them for addition and then call
1831 ** for the AQ update.
1833 SLIST_FOREACH(f, &vsi->ftl, next) {
1834 if (f->flags & IXL_FILTER_VLAN) {
1842 printf("setup vlan: no filters found!\n");
1845 flags = IXL_FILTER_VLAN;
1846 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1847 ixl_add_hw_filters(vsi, flags, cnt);
1851 * In some firmware versions there is default MAC/VLAN filter
1852 * configured which interferes with filters managed by driver.
1853 * Make sure it's removed.
1856 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1858 struct i40e_aqc_remove_macvlan_element_data e;
1860 bzero(&e, sizeof(e));
1861 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1863 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1864 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1866 bzero(&e, sizeof(e));
1867 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1869 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1870 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1871 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1875 ** Initialize filter list and add filters that the hardware
1876 ** needs to know about.
1878 ** Requires VSI's filter list & seid to be set before calling.
1881 ixl_init_filters(struct ixl_vsi *vsi)
1883 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1885 /* Initialize mac filter list for VSI */
1886 SLIST_INIT(&vsi->ftl);
1888 /* Receive broadcast Ethernet frames */
1889 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1891 ixl_del_default_hw_filters(vsi);
1893 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1895 * Prevent Tx flow control frames from being sent out by
1896 * non-firmware transmitters.
1897 * This affects every VSI in the PF.
1899 if (pf->enable_tx_fc_filter)
1900 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1904 ** This routine adds mulicast filters
1907 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1909 struct ixl_mac_filter *f;
1911 /* Does one already exist */
1912 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1916 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1918 f->flags |= IXL_FILTER_MC;
1920 printf("WARNING: no filter available!!\n");
1924 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1926 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1930 * This routine adds a MAC/VLAN filter to the software filter
1931 * list, then adds that new filter to the HW if it doesn't already
1932 * exist in the SW filter list.
1935 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1937 struct ixl_mac_filter *f, *tmp;
1941 DEBUGOUT("ixl_add_filter: begin");
1946 /* Does one already exist */
1947 f = ixl_find_filter(vsi, macaddr, vlan);
1951 ** Is this the first vlan being registered, if so we
1952 ** need to remove the ANY filter that indicates we are
1953 ** not in a vlan, and replace that with a 0 filter.
1955 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1956 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1958 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1959 ixl_add_filter(vsi, macaddr, 0);
1963 f = ixl_new_filter(vsi, macaddr, vlan);
1965 device_printf(dev, "WARNING: no filter available!!\n");
1968 if (f->vlan != IXL_VLAN_ANY)
1969 f->flags |= IXL_FILTER_VLAN;
1973 f->flags |= IXL_FILTER_USED;
1974 ixl_add_hw_filters(vsi, f->flags, 1);
1978 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1980 struct ixl_mac_filter *f;
1982 f = ixl_find_filter(vsi, macaddr, vlan);
1986 f->flags |= IXL_FILTER_DEL;
1987 ixl_del_hw_filters(vsi, 1);
1988 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1991 /* Check if this is the last vlan removal */
1992 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1993 /* Switch back to a non-vlan filter */
1994 ixl_del_filter(vsi, macaddr, 0);
1995 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2001 ** Find the filter with both matching mac addr and vlan id
2003 struct ixl_mac_filter *
2004 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2006 struct ixl_mac_filter *f;
2008 SLIST_FOREACH(f, &vsi->ftl, next) {
2009 if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2010 && (f->vlan == vlan)) {
2019 ** This routine takes additions to the vsi filter
2020 ** table and creates an Admin Queue call to create
2021 ** the filters in the hardware.
2024 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2026 struct i40e_aqc_add_macvlan_element_data *a, *b;
2027 struct ixl_mac_filter *f;
2031 enum i40e_status_code status;
2039 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2043 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2044 M_DEVBUF, M_NOWAIT | M_ZERO);
2046 device_printf(dev, "add_hw_filters failed to get memory\n");
2051 ** Scan the filter list, each time we find one
2052 ** we add it to the admin queue array and turn off
2055 SLIST_FOREACH(f, &vsi->ftl, next) {
2056 if ((f->flags & flags) == flags) {
2057 b = &a[j]; // a pox on fvl long names :)
2058 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2059 if (f->vlan == IXL_VLAN_ANY) {
2061 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2063 b->vlan_tag = f->vlan;
2066 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2067 f->flags &= ~IXL_FILTER_ADD;
2070 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2071 MAC_FORMAT_ARGS(f->macaddr));
2077 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2079 device_printf(dev, "i40e_aq_add_macvlan status %s, "
2080 "error %s\n", i40e_stat_str(hw, status),
2081 i40e_aq_str(hw, hw->aq.asq_last_status));
2083 vsi->hw_filters_add += j;
2090 ** This routine takes removals in the vsi filter
2091 ** table and creates an Admin Queue call to delete
2092 ** the filters in the hardware.
2095 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2097 struct i40e_aqc_remove_macvlan_element_data *d, *e;
2101 struct ixl_mac_filter *f, *f_temp;
2102 enum i40e_status_code status;
2109 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2110 M_DEVBUF, M_NOWAIT | M_ZERO);
2112 device_printf(dev, "%s: failed to get memory\n", __func__);
2116 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2117 if (f->flags & IXL_FILTER_DEL) {
2118 e = &d[j]; // a pox on fvl long names :)
2119 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2120 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2121 if (f->vlan == IXL_VLAN_ANY) {
2123 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2125 e->vlan_tag = f->vlan;
2128 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2129 MAC_FORMAT_ARGS(f->macaddr));
2131 /* delete entry from vsi list */
2132 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2140 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2143 for (int i = 0; i < j; i++)
2144 sc += (!d[i].error_code);
2145 vsi->hw_filters_del += sc;
2147 "Failed to remove %d/%d filters, error %s\n",
2148 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2150 vsi->hw_filters_del += j;
2157 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2159 struct i40e_hw *hw = &pf->hw;
2164 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2166 ixl_dbg(pf, IXL_DBG_EN_DIS,
2167 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2170 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2172 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2173 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2174 I40E_QTX_ENA_QENA_STAT_MASK;
2175 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2176 /* Verify the enable took */
2177 for (int j = 0; j < 10; j++) {
2178 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2179 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2181 i40e_usec_delay(10);
2183 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2184 device_printf(pf->dev, "TX queue %d still disabled!\n",
2193 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2195 struct i40e_hw *hw = &pf->hw;
2200 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2202 ixl_dbg(pf, IXL_DBG_EN_DIS,
2203 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2206 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2207 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2208 I40E_QRX_ENA_QENA_STAT_MASK;
2209 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2210 /* Verify the enable took */
2211 for (int j = 0; j < 10; j++) {
2212 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2213 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2215 i40e_usec_delay(10);
2217 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2218 device_printf(pf->dev, "RX queue %d still disabled!\n",
2227 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2231 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2232 /* Called function already prints error message */
2235 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2239 /* For PF VSI only */
2241 ixl_enable_rings(struct ixl_vsi *vsi)
2243 struct ixl_pf *pf = vsi->back;
2246 for (int i = 0; i < vsi->num_tx_queues; i++)
2247 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2249 for (int i = 0; i < vsi->num_rx_queues; i++)
2250 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2256 * Returns error on first ring that is detected hung.
2259 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2261 struct i40e_hw *hw = &pf->hw;
2266 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2268 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2269 i40e_usec_delay(500);
2271 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2272 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2273 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2274 /* Verify the disable took */
2275 for (int j = 0; j < 10; j++) {
2276 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2277 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2279 i40e_msec_delay(10);
2281 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2282 device_printf(pf->dev, "TX queue %d still enabled!\n",
2291 * Returns error on first ring that is detected hung.
2294 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2296 struct i40e_hw *hw = &pf->hw;
2301 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2303 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2304 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2305 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2306 /* Verify the disable took */
2307 for (int j = 0; j < 10; j++) {
2308 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2309 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2311 i40e_msec_delay(10);
2313 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2314 device_printf(pf->dev, "RX queue %d still enabled!\n",
2323 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2327 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2328 /* Called function already prints error message */
2331 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2336 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2340 for (int i = 0; i < vsi->num_tx_queues; i++)
2341 error = ixl_disable_tx_ring(pf, qtag, i);
2343 for (int i = 0; i < vsi->num_rx_queues; i++)
2344 error = ixl_disable_rx_ring(pf, qtag, i);
2350 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2352 struct i40e_hw *hw = &pf->hw;
2353 device_t dev = pf->dev;
2355 bool mdd_detected = false;
2356 bool pf_mdd_detected = false;
2357 bool vf_mdd_detected = false;
2360 u8 pf_mdet_num, vp_mdet_num;
2363 /* find what triggered the MDD event */
2364 reg = rd32(hw, I40E_GL_MDET_TX);
2365 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2366 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2367 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2368 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2369 I40E_GL_MDET_TX_VF_NUM_SHIFT;
2370 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2371 I40E_GL_MDET_TX_EVENT_SHIFT;
2372 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2373 I40E_GL_MDET_TX_QUEUE_SHIFT;
2374 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2375 mdd_detected = true;
2381 reg = rd32(hw, I40E_PF_MDET_TX);
2382 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2383 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2384 pf_mdet_num = hw->pf_id;
2385 pf_mdd_detected = true;
2388 /* Check if MDD was caused by a VF */
2389 for (int i = 0; i < pf->num_vfs; i++) {
2391 reg = rd32(hw, I40E_VP_MDET_TX(i));
2392 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2393 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2395 vf->num_mdd_events++;
2396 vf_mdd_detected = true;
2400 /* Print out an error message */
2401 if (vf_mdd_detected && pf_mdd_detected)
2403 "Malicious Driver Detection event %d"
2404 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2405 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2406 else if (vf_mdd_detected && !pf_mdd_detected)
2408 "Malicious Driver Detection event %d"
2409 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2410 event, queue, pf_num, vf_num, vp_mdet_num);
2411 else if (!vf_mdd_detected && pf_mdd_detected)
2413 "Malicious Driver Detection event %d"
2414 " on TX queue %d, pf number %d (PF-%d)\n",
2415 event, queue, pf_num, pf_mdet_num);
2416 /* Theoretically shouldn't happen */
2419 "TX Malicious Driver Detection event (unknown)\n");
2423 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2425 struct i40e_hw *hw = &pf->hw;
2426 device_t dev = pf->dev;
2428 bool mdd_detected = false;
2429 bool pf_mdd_detected = false;
2430 bool vf_mdd_detected = false;
2433 u8 pf_mdet_num, vp_mdet_num;
2437 * GL_MDET_RX doesn't contain VF number information, unlike
2440 reg = rd32(hw, I40E_GL_MDET_RX);
2441 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2442 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2443 I40E_GL_MDET_RX_FUNCTION_SHIFT;
2444 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2445 I40E_GL_MDET_RX_EVENT_SHIFT;
2446 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2447 I40E_GL_MDET_RX_QUEUE_SHIFT;
2448 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2449 mdd_detected = true;
2455 reg = rd32(hw, I40E_PF_MDET_RX);
2456 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2457 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2458 pf_mdet_num = hw->pf_id;
2459 pf_mdd_detected = true;
2462 /* Check if MDD was caused by a VF */
2463 for (int i = 0; i < pf->num_vfs; i++) {
2465 reg = rd32(hw, I40E_VP_MDET_RX(i));
2466 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2467 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2469 vf->num_mdd_events++;
2470 vf_mdd_detected = true;
2474 /* Print out an error message */
2475 if (vf_mdd_detected && pf_mdd_detected)
2477 "Malicious Driver Detection event %d"
2478 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2479 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2480 else if (vf_mdd_detected && !pf_mdd_detected)
2482 "Malicious Driver Detection event %d"
2483 " on RX queue %d, pf number %d, (VF-%d)\n",
2484 event, queue, pf_num, vp_mdet_num);
2485 else if (!vf_mdd_detected && pf_mdd_detected)
2487 "Malicious Driver Detection event %d"
2488 " on RX queue %d, pf number %d (PF-%d)\n",
2489 event, queue, pf_num, pf_mdet_num);
2490 /* Theoretically shouldn't happen */
2493 "RX Malicious Driver Detection event (unknown)\n");
2497 * ixl_handle_mdd_event
2499 * Called from interrupt handler to identify possibly malicious vfs
2500 * (But also detects events from the PF, as well)
2503 ixl_handle_mdd_event(struct ixl_pf *pf)
2505 struct i40e_hw *hw = &pf->hw;
2509 * Handle both TX/RX because it's possible they could
2510 * both trigger in the same interrupt.
2512 ixl_handle_tx_mdd_event(pf);
2513 ixl_handle_rx_mdd_event(pf);
2515 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2517 /* re-enable mdd interrupt cause */
2518 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2519 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2520 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2525 ixl_enable_intr(struct ixl_vsi *vsi)
2527 struct i40e_hw *hw = vsi->hw;
2528 struct ixl_rx_queue *que = vsi->rx_queues;
2530 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2531 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2532 ixl_enable_queue(hw, que->rxr.me);
2534 ixl_enable_intr0(hw);
2538 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2540 struct i40e_hw *hw = vsi->hw;
2541 struct ixl_rx_queue *que = vsi->rx_queues;
2543 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 ixl_disable_queue(hw, que->rxr.me);
2548 ixl_enable_intr0(struct i40e_hw *hw)
2552 /* Use IXL_ITR_NONE so ITR isn't updated here */
2553 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2554 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2555 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2556 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2560 ixl_disable_intr0(struct i40e_hw *hw)
2564 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2565 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2570 ixl_enable_queue(struct i40e_hw *hw, int id)
2574 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2575 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2576 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2577 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2581 ixl_disable_queue(struct i40e_hw *hw, int id)
2585 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2586 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2590 ixl_update_stats_counters(struct ixl_pf *pf)
2592 struct i40e_hw *hw = &pf->hw;
2593 struct ixl_vsi *vsi = &pf->vsi;
2596 struct i40e_hw_port_stats *nsd = &pf->stats;
2597 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2599 /* Update hw stats */
2600 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2601 pf->stat_offsets_loaded,
2602 &osd->crc_errors, &nsd->crc_errors);
2603 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2604 pf->stat_offsets_loaded,
2605 &osd->illegal_bytes, &nsd->illegal_bytes);
2606 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2607 I40E_GLPRT_GORCL(hw->port),
2608 pf->stat_offsets_loaded,
2609 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2610 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2611 I40E_GLPRT_GOTCL(hw->port),
2612 pf->stat_offsets_loaded,
2613 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2614 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2615 pf->stat_offsets_loaded,
2616 &osd->eth.rx_discards,
2617 &nsd->eth.rx_discards);
2618 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2619 I40E_GLPRT_UPRCL(hw->port),
2620 pf->stat_offsets_loaded,
2621 &osd->eth.rx_unicast,
2622 &nsd->eth.rx_unicast);
2623 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2624 I40E_GLPRT_UPTCL(hw->port),
2625 pf->stat_offsets_loaded,
2626 &osd->eth.tx_unicast,
2627 &nsd->eth.tx_unicast);
2628 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2629 I40E_GLPRT_MPRCL(hw->port),
2630 pf->stat_offsets_loaded,
2631 &osd->eth.rx_multicast,
2632 &nsd->eth.rx_multicast);
2633 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2634 I40E_GLPRT_MPTCL(hw->port),
2635 pf->stat_offsets_loaded,
2636 &osd->eth.tx_multicast,
2637 &nsd->eth.tx_multicast);
2638 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2639 I40E_GLPRT_BPRCL(hw->port),
2640 pf->stat_offsets_loaded,
2641 &osd->eth.rx_broadcast,
2642 &nsd->eth.rx_broadcast);
2643 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2644 I40E_GLPRT_BPTCL(hw->port),
2645 pf->stat_offsets_loaded,
2646 &osd->eth.tx_broadcast,
2647 &nsd->eth.tx_broadcast);
2649 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2650 pf->stat_offsets_loaded,
2651 &osd->tx_dropped_link_down,
2652 &nsd->tx_dropped_link_down);
2653 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2654 pf->stat_offsets_loaded,
2655 &osd->mac_local_faults,
2656 &nsd->mac_local_faults);
2657 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2658 pf->stat_offsets_loaded,
2659 &osd->mac_remote_faults,
2660 &nsd->mac_remote_faults);
2661 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2662 pf->stat_offsets_loaded,
2663 &osd->rx_length_errors,
2664 &nsd->rx_length_errors);
2666 /* Flow control (LFC) stats */
2667 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2668 pf->stat_offsets_loaded,
2669 &osd->link_xon_rx, &nsd->link_xon_rx);
2670 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2671 pf->stat_offsets_loaded,
2672 &osd->link_xon_tx, &nsd->link_xon_tx);
2673 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2674 pf->stat_offsets_loaded,
2675 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2676 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2677 pf->stat_offsets_loaded,
2678 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2680 /* Packet size stats rx */
2681 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2682 I40E_GLPRT_PRC64L(hw->port),
2683 pf->stat_offsets_loaded,
2684 &osd->rx_size_64, &nsd->rx_size_64);
2685 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2686 I40E_GLPRT_PRC127L(hw->port),
2687 pf->stat_offsets_loaded,
2688 &osd->rx_size_127, &nsd->rx_size_127);
2689 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2690 I40E_GLPRT_PRC255L(hw->port),
2691 pf->stat_offsets_loaded,
2692 &osd->rx_size_255, &nsd->rx_size_255);
2693 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2694 I40E_GLPRT_PRC511L(hw->port),
2695 pf->stat_offsets_loaded,
2696 &osd->rx_size_511, &nsd->rx_size_511);
2697 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2698 I40E_GLPRT_PRC1023L(hw->port),
2699 pf->stat_offsets_loaded,
2700 &osd->rx_size_1023, &nsd->rx_size_1023);
2701 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2702 I40E_GLPRT_PRC1522L(hw->port),
2703 pf->stat_offsets_loaded,
2704 &osd->rx_size_1522, &nsd->rx_size_1522);
2705 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2706 I40E_GLPRT_PRC9522L(hw->port),
2707 pf->stat_offsets_loaded,
2708 &osd->rx_size_big, &nsd->rx_size_big);
2710 /* Packet size stats tx */
2711 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2712 I40E_GLPRT_PTC64L(hw->port),
2713 pf->stat_offsets_loaded,
2714 &osd->tx_size_64, &nsd->tx_size_64);
2715 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2716 I40E_GLPRT_PTC127L(hw->port),
2717 pf->stat_offsets_loaded,
2718 &osd->tx_size_127, &nsd->tx_size_127);
2719 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2720 I40E_GLPRT_PTC255L(hw->port),
2721 pf->stat_offsets_loaded,
2722 &osd->tx_size_255, &nsd->tx_size_255);
2723 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2724 I40E_GLPRT_PTC511L(hw->port),
2725 pf->stat_offsets_loaded,
2726 &osd->tx_size_511, &nsd->tx_size_511);
2727 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2728 I40E_GLPRT_PTC1023L(hw->port),
2729 pf->stat_offsets_loaded,
2730 &osd->tx_size_1023, &nsd->tx_size_1023);
2731 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2732 I40E_GLPRT_PTC1522L(hw->port),
2733 pf->stat_offsets_loaded,
2734 &osd->tx_size_1522, &nsd->tx_size_1522);
2735 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2736 I40E_GLPRT_PTC9522L(hw->port),
2737 pf->stat_offsets_loaded,
2738 &osd->tx_size_big, &nsd->tx_size_big);
2740 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2741 pf->stat_offsets_loaded,
2742 &osd->rx_undersize, &nsd->rx_undersize);
2743 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2744 pf->stat_offsets_loaded,
2745 &osd->rx_fragments, &nsd->rx_fragments);
2746 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2747 pf->stat_offsets_loaded,
2748 &osd->rx_oversize, &nsd->rx_oversize);
2749 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2750 pf->stat_offsets_loaded,
2751 &osd->rx_jabber, &nsd->rx_jabber);
2752 pf->stat_offsets_loaded = true;
2755 /* Update vsi stats */
2756 ixl_update_vsi_stats(vsi);
2758 for (int i = 0; i < pf->num_vfs; i++) {
2760 if (vf->vf_flags & VF_FLAG_ENABLED)
2761 ixl_update_eth_stats(&pf->vfs[i].vsi);
2766 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2768 struct i40e_hw *hw = &pf->hw;
2769 device_t dev = pf->dev;
2772 error = i40e_shutdown_lan_hmc(hw);
2775 "Shutdown LAN HMC failed with code %d\n", error);
2777 ixl_disable_intr0(hw);
2779 error = i40e_shutdown_adminq(hw);
2782 "Shutdown Admin queue failed with code %d\n", error);
2784 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2789 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2791 struct i40e_hw *hw = &pf->hw;
2792 struct ixl_vsi *vsi = &pf->vsi;
2793 device_t dev = pf->dev;
2796 device_printf(dev, "Rebuilding driver state...\n");
2798 error = i40e_pf_reset(hw);
2800 device_printf(dev, "PF reset failure %s\n",
2801 i40e_stat_str(hw, error));
2802 goto ixl_rebuild_hw_structs_after_reset_err;
2806 error = i40e_init_adminq(hw);
2807 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2808 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2810 goto ixl_rebuild_hw_structs_after_reset_err;
2813 i40e_clear_pxe_mode(hw);
2815 error = ixl_get_hw_capabilities(pf);
2817 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2818 goto ixl_rebuild_hw_structs_after_reset_err;
2821 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2822 hw->func_caps.num_rx_qp, 0, 0);
2824 device_printf(dev, "init_lan_hmc failed: %d\n", error);
2825 goto ixl_rebuild_hw_structs_after_reset_err;
2828 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2830 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2831 goto ixl_rebuild_hw_structs_after_reset_err;
2834 /* reserve a contiguous allocation for the PF's VSI */
2835 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2837 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2839 /* TODO: error handling */
2842 error = ixl_switch_config(pf);
2844 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2847 goto ixl_rebuild_hw_structs_after_reset_err;
2850 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2853 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2854 " aq_err %d\n", error, hw->aq.asq_last_status);
2856 goto ixl_rebuild_hw_structs_after_reset_err;
2860 error = i40e_set_fc(hw, &set_fc_err_mask, true);
2862 device_printf(dev, "init: setting link flow control failed; retcode %d,"
2863 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2865 goto ixl_rebuild_hw_structs_after_reset_err;
2868 /* Remove default filters reinstalled by FW on reset */
2869 ixl_del_default_hw_filters(vsi);
2871 /* Determine link state */
2872 if (ixl_attach_get_link_status(pf)) {
2874 /* TODO: error handling */
2877 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2878 ixl_get_fw_lldp_status(pf);
2880 /* Keep admin queue interrupts active while driver is loaded */
2881 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2882 ixl_configure_intr0_msix(pf);
2883 ixl_enable_intr0(hw);
2886 device_printf(dev, "Rebuilding driver state done.\n");
2889 ixl_rebuild_hw_structs_after_reset_err:
2890 device_printf(dev, "Reload the driver to recover\n");
2895 ixl_handle_empr_reset(struct ixl_pf *pf)
2897 struct ixl_vsi *vsi = &pf->vsi;
2898 struct i40e_hw *hw = &pf->hw;
2899 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2903 ixl_prepare_for_reset(pf, is_up);
2905 /* Typically finishes within 3-4 seconds */
2906 while (count++ < 100) {
2907 reg = rd32(hw, I40E_GLGEN_RSTAT)
2908 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2910 i40e_msec_delay(100);
2914 ixl_dbg(pf, IXL_DBG_INFO,
2915 "Reset wait count: %d\n", count);
2917 ixl_rebuild_hw_structs_after_reset(pf);
2919 atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2923 * Update VSI-specific ethernet statistics counters.
2926 ixl_update_eth_stats(struct ixl_vsi *vsi)
2928 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2929 struct i40e_hw *hw = &pf->hw;
2930 struct i40e_eth_stats *es;
2931 struct i40e_eth_stats *oes;
2932 struct i40e_hw_port_stats *nsd;
2933 u16 stat_idx = vsi->info.stat_counter_idx;
2935 es = &vsi->eth_stats;
2936 oes = &vsi->eth_stats_offsets;
2939 /* Gather up the stats that the hw collects */
2940 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2941 vsi->stat_offsets_loaded,
2942 &oes->tx_errors, &es->tx_errors);
2943 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2944 vsi->stat_offsets_loaded,
2945 &oes->rx_discards, &es->rx_discards);
2947 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2948 I40E_GLV_GORCL(stat_idx),
2949 vsi->stat_offsets_loaded,
2950 &oes->rx_bytes, &es->rx_bytes);
2951 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2952 I40E_GLV_UPRCL(stat_idx),
2953 vsi->stat_offsets_loaded,
2954 &oes->rx_unicast, &es->rx_unicast);
2955 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2956 I40E_GLV_MPRCL(stat_idx),
2957 vsi->stat_offsets_loaded,
2958 &oes->rx_multicast, &es->rx_multicast);
2959 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2960 I40E_GLV_BPRCL(stat_idx),
2961 vsi->stat_offsets_loaded,
2962 &oes->rx_broadcast, &es->rx_broadcast);
2964 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2965 I40E_GLV_GOTCL(stat_idx),
2966 vsi->stat_offsets_loaded,
2967 &oes->tx_bytes, &es->tx_bytes);
2968 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2969 I40E_GLV_UPTCL(stat_idx),
2970 vsi->stat_offsets_loaded,
2971 &oes->tx_unicast, &es->tx_unicast);
2972 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2973 I40E_GLV_MPTCL(stat_idx),
2974 vsi->stat_offsets_loaded,
2975 &oes->tx_multicast, &es->tx_multicast);
2976 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2977 I40E_GLV_BPTCL(stat_idx),
2978 vsi->stat_offsets_loaded,
2979 &oes->tx_broadcast, &es->tx_broadcast);
2980 vsi->stat_offsets_loaded = true;
2984 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2988 struct i40e_eth_stats *es;
2991 struct i40e_hw_port_stats *nsd;
2995 es = &vsi->eth_stats;
2998 ixl_update_eth_stats(vsi);
3000 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3002 /* Update ifnet stats */
3003 IXL_SET_IPACKETS(vsi, es->rx_unicast +
3006 IXL_SET_OPACKETS(vsi, es->tx_unicast +
3009 IXL_SET_IBYTES(vsi, es->rx_bytes);
3010 IXL_SET_OBYTES(vsi, es->tx_bytes);
3011 IXL_SET_IMCASTS(vsi, es->rx_multicast);
3012 IXL_SET_OMCASTS(vsi, es->tx_multicast);
3014 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3015 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3017 IXL_SET_OERRORS(vsi, es->tx_errors);
3018 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3019 IXL_SET_OQDROPS(vsi, tx_discards);
3020 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3021 IXL_SET_COLLISIONS(vsi, 0);
3025 * Reset all of the stats for the given pf
3028 ixl_pf_reset_stats(struct ixl_pf *pf)
3030 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3031 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3032 pf->stat_offsets_loaded = false;
3036 * Resets all stats of the given vsi
3039 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3041 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3042 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3043 vsi->stat_offsets_loaded = false;
3047 * Read and update a 48 bit stat from the hw
3049 * Since the device stats are not reset at PFReset, they likely will not
3050 * be zeroed when the driver starts. We'll save the first values read
3051 * and use them as offsets to be subtracted from the raw values in order
3052 * to report stats that count from zero.
3055 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3056 bool offset_loaded, u64 *offset, u64 *stat)
3060 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3061 new_data = rd64(hw, loreg);
3064 * Use two rd32's instead of one rd64; FreeBSD versions before
3065 * 10 don't support 64-bit bus reads/writes.
3067 new_data = rd32(hw, loreg);
3068 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3073 if (new_data >= *offset)
3074 *stat = new_data - *offset;
3076 *stat = (new_data + ((u64)1 << 48)) - *offset;
3077 *stat &= 0xFFFFFFFFFFFFULL;
3081 * Read and update a 32 bit stat from the hw
3084 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3085 bool offset_loaded, u64 *offset, u64 *stat)
3089 new_data = rd32(hw, reg);
3092 if (new_data >= *offset)
3093 *stat = (u32)(new_data - *offset);
3095 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3099 ixl_add_device_sysctls(struct ixl_pf *pf)
3101 device_t dev = pf->dev;
3102 struct i40e_hw *hw = &pf->hw;
3104 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3105 struct sysctl_oid_list *ctx_list =
3106 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3108 struct sysctl_oid *debug_node;
3109 struct sysctl_oid_list *debug_list;
3111 struct sysctl_oid *fec_node;
3112 struct sysctl_oid_list *fec_list;
3114 /* Set up sysctls */
3115 SYSCTL_ADD_PROC(ctx, ctx_list,
3116 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3117 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3119 SYSCTL_ADD_PROC(ctx, ctx_list,
3120 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3121 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3123 SYSCTL_ADD_PROC(ctx, ctx_list,
3124 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3125 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3127 SYSCTL_ADD_PROC(ctx, ctx_list,
3128 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3129 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3131 SYSCTL_ADD_PROC(ctx, ctx_list,
3132 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3133 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3135 SYSCTL_ADD_PROC(ctx, ctx_list,
3136 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3137 pf, 0, ixl_sysctl_unallocated_queues, "I",
3138 "Queues not allocated to a PF or VF");
3140 SYSCTL_ADD_PROC(ctx, ctx_list,
3141 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3142 pf, 0, ixl_sysctl_pf_tx_itr, "I",
3143 "Immediately set TX ITR value for all queues");
3145 SYSCTL_ADD_PROC(ctx, ctx_list,
3146 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3147 pf, 0, ixl_sysctl_pf_rx_itr, "I",
3148 "Immediately set RX ITR value for all queues");
3150 SYSCTL_ADD_INT(ctx, ctx_list,
3151 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3152 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3154 SYSCTL_ADD_INT(ctx, ctx_list,
3155 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3156 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3158 /* Add FEC sysctls for 25G adapters */
3159 if (i40e_is_25G_device(hw->device_id)) {
3160 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3161 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3162 fec_list = SYSCTL_CHILDREN(fec_node);
3164 SYSCTL_ADD_PROC(ctx, fec_list,
3165 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3166 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3168 SYSCTL_ADD_PROC(ctx, fec_list,
3169 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3170 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3172 SYSCTL_ADD_PROC(ctx, fec_list,
3173 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3174 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3176 SYSCTL_ADD_PROC(ctx, fec_list,
3177 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3178 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3180 SYSCTL_ADD_PROC(ctx, fec_list,
3181 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3182 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3185 SYSCTL_ADD_PROC(ctx, ctx_list,
3186 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3187 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3189 /* Add sysctls meant to print debug information, but don't list them
3190 * in "sysctl -a" output. */
3191 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3192 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3193 debug_list = SYSCTL_CHILDREN(debug_node);
3195 SYSCTL_ADD_UINT(ctx, debug_list,
3196 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3197 &pf->hw.debug_mask, 0, "Shared code debug message level");
3199 SYSCTL_ADD_UINT(ctx, debug_list,
3200 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3201 &pf->dbg_mask, 0, "Non-shared code debug message level");
3203 SYSCTL_ADD_PROC(ctx, debug_list,
3204 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3205 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3207 SYSCTL_ADD_PROC(ctx, debug_list,
3208 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3209 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3211 SYSCTL_ADD_PROC(ctx, debug_list,
3212 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3213 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3215 SYSCTL_ADD_PROC(ctx, debug_list,
3216 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3217 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3219 SYSCTL_ADD_PROC(ctx, debug_list,
3220 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3221 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3223 SYSCTL_ADD_PROC(ctx, debug_list,
3224 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3225 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3227 SYSCTL_ADD_PROC(ctx, debug_list,
3228 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3229 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3231 SYSCTL_ADD_PROC(ctx, debug_list,
3232 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3233 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3235 SYSCTL_ADD_PROC(ctx, debug_list,
3236 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3237 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3239 SYSCTL_ADD_PROC(ctx, debug_list,
3240 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3241 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3243 SYSCTL_ADD_PROC(ctx, debug_list,
3244 OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3245 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3247 SYSCTL_ADD_PROC(ctx, debug_list,
3248 OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3249 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3251 SYSCTL_ADD_PROC(ctx, debug_list,
3252 OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3253 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3255 SYSCTL_ADD_PROC(ctx, debug_list,
3256 OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 pf, 0, ixl_sysctl_do_emp_reset, "I",
3258 "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3260 SYSCTL_ADD_PROC(ctx, debug_list,
3261 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3262 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3265 SYSCTL_ADD_PROC(ctx, debug_list,
3266 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3267 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3269 SYSCTL_ADD_PROC(ctx, debug_list,
3270 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3271 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3273 SYSCTL_ADD_PROC(ctx, debug_list,
3274 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3275 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3280 * Primarily for finding out how many queues can be assigned to VFs,
3284 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3286 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3289 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3291 return sysctl_handle_int(oidp, NULL, queues, req);
3295 ** Set flow control using sysctl:
3302 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3304 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3305 struct i40e_hw *hw = &pf->hw;
3306 device_t dev = pf->dev;
3307 int requested_fc, error = 0;
3308 enum i40e_status_code aq_error = 0;
3312 requested_fc = pf->fc;
3313 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3314 if ((error) || (req->newptr == NULL))
3316 if (requested_fc < 0 || requested_fc > 3) {
3318 "Invalid fc mode; valid modes are 0 through 3\n");
3322 /* Set fc ability for port */
3323 hw->fc.requested_mode = requested_fc;
3324 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3327 "%s: Error setting new fc mode %d; fc_err %#x\n",
3328 __func__, aq_error, fc_aq_err);
3331 pf->fc = requested_fc;
3337 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3351 switch (link_speed) {
3352 case I40E_LINK_SPEED_100MB:
3355 case I40E_LINK_SPEED_1GB:
3358 case I40E_LINK_SPEED_10GB:
3361 case I40E_LINK_SPEED_40GB:
3364 case I40E_LINK_SPEED_20GB:
3367 case I40E_LINK_SPEED_25GB:
3370 case I40E_LINK_SPEED_UNKNOWN:
3376 return speeds[index];
3380 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3382 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3383 struct i40e_hw *hw = &pf->hw;
3386 ixl_update_link_status(pf);
3388 error = sysctl_handle_string(oidp,
3389 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3395 * Converts 8-bit speeds value to and from sysctl flags and
3396 * Admin Queue flags.
3399 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3401 static u16 speedmap[6] = {
3402 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
3403 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
3404 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
3405 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
3406 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
3407 (I40E_LINK_SPEED_40GB | (0x20 << 8))
3411 for (int i = 0; i < 6; i++) {
3413 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3415 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3422 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3424 struct i40e_hw *hw = &pf->hw;
3425 device_t dev = pf->dev;
3426 struct i40e_aq_get_phy_abilities_resp abilities;
3427 struct i40e_aq_set_phy_config config;
3428 enum i40e_status_code aq_error = 0;
3430 /* Get current capability information */
3431 aq_error = i40e_aq_get_phy_capabilities(hw,
3432 FALSE, FALSE, &abilities, NULL);
3435 "%s: Error getting phy capabilities %d,"
3436 " aq error: %d\n", __func__, aq_error,
3437 hw->aq.asq_last_status);
3441 /* Prepare new config */
3442 bzero(&config, sizeof(config));
3444 config.link_speed = speeds;
3446 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3447 config.phy_type = abilities.phy_type;
3448 config.phy_type_ext = abilities.phy_type_ext;
3449 config.abilities = abilities.abilities
3450 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3451 config.eee_capability = abilities.eee_capability;
3452 config.eeer = abilities.eeer_val;
3453 config.low_power_ctrl = abilities.d3_lpan;
3454 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3456 /* Do aq command & restart link */
3457 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3460 "%s: Error setting new phy config %d,"
3461 " aq error: %d\n", __func__, aq_error,
3462 hw->aq.asq_last_status);
3470 ** Supported link speedsL
3480 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3482 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3483 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3485 return sysctl_handle_int(oidp, NULL, supported, req);
3489 ** Control link advertise speed:
3491 ** 0x1 - advertise 100 Mb
3492 ** 0x2 - advertise 1G
3493 ** 0x4 - advertise 10G
3494 ** 0x8 - advertise 20G
3495 ** 0x10 - advertise 25G
3496 ** 0x20 - advertise 40G
3498 ** Set to 0 to disable link
3501 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3503 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3504 device_t dev = pf->dev;
3505 u8 converted_speeds;
3506 int requested_ls = 0;
3509 /* Read in new mode */
3510 requested_ls = pf->advertised_speed;
3511 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3512 if ((error) || (req->newptr == NULL))
3515 /* Error out if bits outside of possible flag range are set */
3516 if ((requested_ls & ~((u8)0x3F)) != 0) {
3517 device_printf(dev, "Input advertised speed out of range; "
3518 "valid flags are: 0x%02x\n",
3519 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3523 /* Check if adapter supports input value */
3524 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3525 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3526 device_printf(dev, "Invalid advertised speed; "
3527 "valid flags are: 0x%02x\n",
3528 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3532 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3536 pf->advertised_speed = requested_ls;
3537 ixl_update_link_status(pf);
3542 ** Get the width and transaction speed of
3543 ** the bus this adapter is plugged into.
3546 ixl_get_bus_info(struct ixl_pf *pf)
3548 struct i40e_hw *hw = &pf->hw;
3549 device_t dev = pf->dev;
3551 u32 offset, num_ports;
3554 /* Some devices don't use PCIE */
3555 if (hw->mac.type == I40E_MAC_X722)
3558 /* Read PCI Express Capabilities Link Status Register */
3559 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3560 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3562 /* Fill out hw struct with PCIE info */
3563 i40e_set_pci_config_data(hw, link);
3565 /* Use info to print out bandwidth messages */
3566 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3567 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3568 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3569 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3570 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3571 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3572 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3573 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3577 * If adapter is in slot with maximum supported speed,
3578 * no warning message needs to be printed out.
3580 if (hw->bus.speed >= i40e_bus_speed_8000
3581 && hw->bus.width >= i40e_bus_width_pcie_x8)
3584 num_ports = bitcount32(hw->func_caps.valid_functions);
3585 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3587 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3588 device_printf(dev, "PCI-Express bandwidth available"
3589 " for this device may be insufficient for"
3590 " optimal performance.\n");
3591 device_printf(dev, "Please move the device to a different"
3592 " PCI-e link with more lanes and/or higher"
3593 " transfer rate.\n");
3598 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3600 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3601 struct i40e_hw *hw = &pf->hw;
3604 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3605 ixl_nvm_version_str(hw, sbuf);
3613 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3615 if ((nvma->command == I40E_NVM_READ) &&
3616 ((nvma->config & 0xFF) == 0xF) &&
3617 (((nvma->config & 0xF00) >> 8) == 0xF) &&
3618 (nvma->offset == 0) &&
3619 (nvma->data_size == 1)) {
3620 // device_printf(dev, "- Get Driver Status Command\n");
3622 else if (nvma->command == I40E_NVM_READ) {
3626 switch (nvma->command) {
3628 device_printf(dev, "- command: I40E_NVM_READ\n");
3631 device_printf(dev, "- command: I40E_NVM_WRITE\n");
3634 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3638 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
3639 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3640 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3641 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3646 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3648 struct i40e_hw *hw = &pf->hw;
3649 struct i40e_nvm_access *nvma;
3650 device_t dev = pf->dev;
3651 enum i40e_status_code status = 0;
3652 size_t nvma_size, ifd_len, exp_len;
3655 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3658 nvma_size = sizeof(struct i40e_nvm_access);
3659 ifd_len = ifd->ifd_len;
3661 if (ifd_len < nvma_size ||
3662 ifd->ifd_data == NULL) {
3663 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3665 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3666 __func__, ifd_len, nvma_size);
3667 device_printf(dev, "%s: data pointer: %p\n", __func__,
3672 nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3673 err = copyin(ifd->ifd_data, nvma, ifd_len);
3675 device_printf(dev, "%s: Cannot get request from user space\n",
3677 free(nvma, M_DEVBUF);
3681 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3682 ixl_print_nvm_cmd(dev, nvma);
3684 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3686 while (count++ < 100) {
3687 i40e_msec_delay(100);
3688 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3693 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3694 free(nvma, M_DEVBUF);
3698 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3699 device_printf(dev, "%s: invalid request, data size not in supported range\n",
3701 free(nvma, M_DEVBUF);
3706 * Older versions of the NVM update tool don't set ifd_len to the size
3707 * of the entire buffer passed to the ioctl. Check the data_size field
3708 * in the contained i40e_nvm_access struct and ensure everything is
3709 * copied in from userspace.
3711 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3713 if (ifd_len < exp_len) {
3715 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3716 err = copyin(ifd->ifd_data, nvma, ifd_len);
3718 device_printf(dev, "%s: Cannot get request from user space\n",
3720 free(nvma, M_DEVBUF);
3725 // TODO: Might need a different lock here
3727 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3728 // IXL_PF_UNLOCK(pf);
3730 err = copyout(nvma, ifd->ifd_data, ifd_len);
3731 free(nvma, M_DEVBUF);
3733 device_printf(dev, "%s: Cannot return data to user space\n",
3738 /* Let the nvmupdate report errors, show them only when debug is enabled */
3739 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3740 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3741 i40e_stat_str(hw, status), perrno);
3744 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3745 * to run this ioctl again. So use -EACCES for -EPERM instead.
3747 if (perrno == -EPERM)
3754 ixl_find_i2c_interface(struct ixl_pf *pf)
3756 struct i40e_hw *hw = &pf->hw;
3757 bool i2c_en, port_matched;
3760 for (int i = 0; i < 4; i++) {
3761 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3762 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3763 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3764 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3766 if (i2c_en && port_matched)
3774 ixl_phy_type_string(u32 bit_pos, bool ext)
3776 static char * phy_types_str[32] = {
3806 "1000BASE-T Optical",
3810 static char * ext_phy_types_str[8] = {
3821 if (ext && bit_pos > 7) return "Invalid_Ext";
3822 if (bit_pos > 31) return "Invalid";
3824 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3827 /* TODO: ERJ: I don't this is necessary anymore. */
3829 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3831 device_t dev = pf->dev;
3832 struct i40e_hw *hw = &pf->hw;
3833 struct i40e_aq_desc desc;
3834 enum i40e_status_code status;
3836 struct i40e_aqc_get_link_status *aq_link_status =
3837 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3839 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3840 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3841 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3844 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3845 __func__, i40e_stat_str(hw, status),
3846 i40e_aq_str(hw, hw->aq.asq_last_status));
3850 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3855 ixl_phy_type_string_ls(u8 val)
3858 return ixl_phy_type_string(val - 0x1F, true);
3860 return ixl_phy_type_string(val, false);
3864 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3866 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3867 device_t dev = pf->dev;
3871 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3873 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3877 struct i40e_aqc_get_link_status link_status;
3878 error = ixl_aq_get_link_status(pf, &link_status);
3884 sbuf_printf(buf, "\n"
3885 "PHY Type : 0x%02x<%s>\n"
3887 "Link info: 0x%02x\n"
3888 "AN info : 0x%02x\n"
3889 "Ext info : 0x%02x\n"
3890 "Loopback : 0x%02x\n"
3894 link_status.phy_type,
3895 ixl_phy_type_string_ls(link_status.phy_type),
3896 link_status.link_speed,
3897 link_status.link_info,
3898 link_status.an_info,
3899 link_status.ext_info,
3900 link_status.loopback,
3901 link_status.max_frame_size,
3903 link_status.power_desc);
3905 error = sbuf_finish(buf);
3907 device_printf(dev, "Error finishing sbuf: %d\n", error);
3914 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3916 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3917 struct i40e_hw *hw = &pf->hw;
3918 device_t dev = pf->dev;
3919 enum i40e_status_code status;
3920 struct i40e_aq_get_phy_abilities_resp abilities;
3924 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3926 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3930 status = i40e_aq_get_phy_capabilities(hw,
3931 FALSE, FALSE, &abilities, NULL);
3934 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3935 __func__, i40e_stat_str(hw, status),
3936 i40e_aq_str(hw, hw->aq.asq_last_status));
3941 sbuf_printf(buf, "\n"
3943 abilities.phy_type);
3945 if (abilities.phy_type != 0) {
3946 sbuf_printf(buf, "<");
3947 for (int i = 0; i < 32; i++)
3948 if ((1 << i) & abilities.phy_type)
3949 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3950 sbuf_printf(buf, ">\n");
3953 sbuf_printf(buf, "PHY Ext : %02x",
3954 abilities.phy_type_ext);
3956 if (abilities.phy_type_ext != 0) {
3957 sbuf_printf(buf, "<");
3958 for (int i = 0; i < 4; i++)
3959 if ((1 << i) & abilities.phy_type_ext)
3960 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3961 sbuf_printf(buf, ">");
3963 sbuf_printf(buf, "\n");
3971 "ID : %02x %02x %02x %02x\n"
3972 "ModType : %02x %02x %02x\n"
3976 abilities.link_speed,
3977 abilities.abilities, abilities.eee_capability,
3978 abilities.eeer_val, abilities.d3_lpan,
3979 abilities.phy_id[0], abilities.phy_id[1],
3980 abilities.phy_id[2], abilities.phy_id[3],
3981 abilities.module_type[0], abilities.module_type[1],
3982 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3983 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3984 abilities.ext_comp_code);
3986 error = sbuf_finish(buf);
3988 device_printf(dev, "Error finishing sbuf: %d\n", error);
3995 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3997 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3998 struct ixl_vsi *vsi = &pf->vsi;
3999 struct ixl_mac_filter *f;
4000 device_t dev = pf->dev;
4001 int error = 0, ftl_len = 0, ftl_counter = 0;
4005 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4007 device_printf(dev, "Could not allocate sbuf for output.\n");
4011 sbuf_printf(buf, "\n");
4013 /* Print MAC filters */
4014 sbuf_printf(buf, "PF Filters:\n");
4015 SLIST_FOREACH(f, &vsi->ftl, next)
4019 sbuf_printf(buf, "(none)\n");
4021 SLIST_FOREACH(f, &vsi->ftl, next) {
4023 MAC_FORMAT ", vlan %4d, flags %#06x",
4024 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4025 /* don't print '\n' for last entry */
4026 if (++ftl_counter != ftl_len)
4027 sbuf_printf(buf, "\n");
4032 /* TODO: Give each VF its own filter list sysctl */
4034 if (pf->num_vfs > 0) {
4035 sbuf_printf(buf, "\n\n");
4036 for (int i = 0; i < pf->num_vfs; i++) {
4038 if (!(vf->vf_flags & VF_FLAG_ENABLED))
4042 ftl_len = 0, ftl_counter = 0;
4043 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4044 SLIST_FOREACH(f, &vsi->ftl, next)
4048 sbuf_printf(buf, "(none)\n");
4050 SLIST_FOREACH(f, &vsi->ftl, next) {
4052 MAC_FORMAT ", vlan %4d, flags %#06x\n",
4053 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4060 error = sbuf_finish(buf);
4062 device_printf(dev, "Error finishing sbuf: %d\n", error);
4068 #define IXL_SW_RES_SIZE 0x14
4070 ixl_res_alloc_cmp(const void *a, const void *b)
4072 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4073 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4074 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4076 return ((int)one->resource_type - (int)two->resource_type);
4080 * Longest string length: 25
4083 ixl_switch_res_type_string(u8 type)
4085 // TODO: This should be changed to static const
4086 char * ixl_switch_res_type_strings[0x14] = {
4089 "Perfect Match MAC address",
4092 "Multicast hash entry",
4093 "Unicast hash entry",
4097 "VLAN Statistic Pool",
4100 "Inner VLAN Forward filter",
4110 return ixl_switch_res_type_strings[type];
4112 return "(Reserved)";
4116 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4118 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4119 struct i40e_hw *hw = &pf->hw;
4120 device_t dev = pf->dev;
4122 enum i40e_status_code status;
4126 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4128 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4130 device_printf(dev, "Could not allocate sbuf for output.\n");
4134 bzero(resp, sizeof(resp));
4135 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4141 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4142 __func__, i40e_stat_str(hw, status),
4143 i40e_aq_str(hw, hw->aq.asq_last_status));
4148 /* Sort entries by type for display */
4149 qsort(resp, num_entries,
4150 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4151 &ixl_res_alloc_cmp);
4153 sbuf_cat(buf, "\n");
4154 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4156 " Type | Guaranteed | Total | Used | Un-allocated\n"
4157 " | (this) | (all) | (this) | (all) \n");
4158 for (int i = 0; i < num_entries; i++) {
4160 "%25s | %10d %5d %6d %12d",
4161 ixl_switch_res_type_string(resp[i].resource_type),
4165 resp[i].total_unalloced);
4166 if (i < num_entries - 1)
4167 sbuf_cat(buf, "\n");
4170 error = sbuf_finish(buf);
4172 device_printf(dev, "Error finishing sbuf: %d\n", error);
4179 ** Caller must init and delete sbuf; this function will clear and
4180 ** finish it for caller.
4183 ixl_switch_element_string(struct sbuf *s,
4184 struct i40e_aqc_switch_config_element_resp *element)
4188 switch (element->element_type) {
4189 case I40E_AQ_SW_ELEM_TYPE_MAC:
4190 sbuf_printf(s, "MAC %3d", element->element_info);
4192 case I40E_AQ_SW_ELEM_TYPE_PF:
4193 sbuf_printf(s, "PF %3d", element->element_info);
4195 case I40E_AQ_SW_ELEM_TYPE_VF:
4196 sbuf_printf(s, "VF %3d", element->element_info);
4198 case I40E_AQ_SW_ELEM_TYPE_EMP:
4201 case I40E_AQ_SW_ELEM_TYPE_BMC:
4204 case I40E_AQ_SW_ELEM_TYPE_PV:
4207 case I40E_AQ_SW_ELEM_TYPE_VEB:
4210 case I40E_AQ_SW_ELEM_TYPE_PA:
4213 case I40E_AQ_SW_ELEM_TYPE_VSI:
4214 sbuf_printf(s, "VSI %3d", element->element_info);
4222 return sbuf_data(s);
4226 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4228 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4229 struct i40e_hw *hw = &pf->hw;
4230 device_t dev = pf->dev;
4233 enum i40e_status_code status;
4236 u8 aq_buf[I40E_AQ_LARGE_BUF];
4238 struct i40e_aqc_get_switch_config_resp *sw_config;
4239 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4241 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4243 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4247 status = i40e_aq_get_switch_config(hw, sw_config,
4248 sizeof(aq_buf), &next, NULL);
4251 "%s: aq_get_switch_config() error %s, aq error %s\n",
4252 __func__, i40e_stat_str(hw, status),
4253 i40e_aq_str(hw, hw->aq.asq_last_status));
4258 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4261 nmbuf = sbuf_new_auto();
4263 device_printf(dev, "Could not allocate sbuf for name output.\n");
4268 sbuf_cat(buf, "\n");
4269 /* Assuming <= 255 elements in switch */
4270 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4271 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4273 ** Revision -- all elements are revision 1 for now
4276 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4277 " | | | (uplink)\n");
4278 for (int i = 0; i < sw_config->header.num_reported; i++) {
4279 // "%4d (%8s) | %8s %8s %#8x",
4280 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4282 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4283 &sw_config->element[i]));
4284 sbuf_cat(buf, " | ");
4285 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4287 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4289 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4290 if (i < sw_config->header.num_reported - 1)
4291 sbuf_cat(buf, "\n");
4295 error = sbuf_finish(buf);
4297 device_printf(dev, "Error finishing sbuf: %d\n", error);
4305 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4307 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4308 struct i40e_hw *hw = &pf->hw;
4309 device_t dev = pf->dev;
4312 enum i40e_status_code status;
4315 struct i40e_aqc_get_set_rss_key_data key_data;
4317 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4319 device_printf(dev, "Could not allocate sbuf for output.\n");
4323 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4325 sbuf_cat(buf, "\n");
4326 if (hw->mac.type == I40E_MAC_X722) {
4327 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4329 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4330 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4332 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4333 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4334 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4338 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4340 error = sbuf_finish(buf);
4342 device_printf(dev, "Error finishing sbuf: %d\n", error);
4349 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4354 if (length < 1 || buf == NULL) return;
4356 int byte_stride = 16;
4357 int lines = length / byte_stride;
4358 int rem = length % byte_stride;
4362 for (i = 0; i < lines; i++) {
4363 width = (rem > 0 && i == lines - 1)
4364 ? rem : byte_stride;
4366 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4368 for (j = 0; j < width; j++)
4369 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4371 if (width < byte_stride) {
4372 for (k = 0; k < (byte_stride - width); k++)
4373 sbuf_printf(sb, " ");
4377 sbuf_printf(sb, "\n");
4381 for (j = 0; j < width; j++) {
4382 c = (char)buf[i * byte_stride + j];
4383 if (c < 32 || c > 126)
4384 sbuf_printf(sb, ".");
4386 sbuf_printf(sb, "%c", c);
4389 sbuf_printf(sb, "\n");
4395 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4397 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4398 struct i40e_hw *hw = &pf->hw;
4399 device_t dev = pf->dev;
4402 enum i40e_status_code status;
4406 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4408 device_printf(dev, "Could not allocate sbuf for output.\n");
4412 bzero(hlut, sizeof(hlut));
4413 sbuf_cat(buf, "\n");
4414 if (hw->mac.type == I40E_MAC_X722) {
4415 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4417 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4418 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4420 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4421 reg = rd32(hw, I40E_PFQF_HLUT(i));
4422 bcopy(®, &hlut[i << 2], 4);
4425 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4427 error = sbuf_finish(buf);
4429 device_printf(dev, "Error finishing sbuf: %d\n", error);
4436 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4438 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4439 struct i40e_hw *hw = &pf->hw;
4442 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4443 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4445 return sysctl_handle_long(oidp, NULL, hena, req);
4449 * Sysctl to disable firmware's link management
4451 * 1 - Disable link management on this port
4452 * 0 - Re-enable link management
4454 * On normal NVMs, firmware manages link by default.
4457 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4459 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4460 struct i40e_hw *hw = &pf->hw;
4461 device_t dev = pf->dev;
4462 int requested_mode = -1;
4463 enum i40e_status_code status = 0;
4466 /* Read in new mode */
4467 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4468 if ((error) || (req->newptr == NULL))
4470 /* Check for sane value */
4471 if (requested_mode < 0 || requested_mode > 1) {
4472 device_printf(dev, "Valid modes are 0 or 1\n");
4477 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4480 "%s: Error setting new phy debug mode %s,"
4481 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4482 i40e_aq_str(hw, hw->aq.asq_last_status));
4490 * Read some diagnostic data from an SFP module
4491 * Bytes 96-99, 102-105 from device address 0xA2
4494 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4496 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4497 device_t dev = pf->dev;
4502 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4504 device_printf(dev, "Error reading from i2c\n");
4507 if (output != 0x3) {
4508 device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4512 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4513 if (!(output & 0x60)) {
4514 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4518 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4520 for (u8 offset = 96; offset < 100; offset++) {
4521 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4522 sbuf_printf(sbuf, "%02X ", output);
4524 for (u8 offset = 102; offset < 106; offset++) {
4525 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4526 sbuf_printf(sbuf, "%02X ", output);
4536 * Sysctl to read a byte from I2C bus.
4538 * Input: 32-bit value:
4539 * bits 0-7: device address (0xA0 or 0xA2)
4540 * bits 8-15: offset (0-255)
4541 * bits 16-31: unused
4542 * Output: 8-bit value read
4545 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4547 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4548 device_t dev = pf->dev;
4549 int input = -1, error = 0;
4550 u8 dev_addr, offset, output;
4552 /* Read in I2C read parameters */
4553 error = sysctl_handle_int(oidp, &input, 0, req);
4554 if ((error) || (req->newptr == NULL))
4556 /* Validate device address */
4557 dev_addr = input & 0xFF;
4558 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4561 offset = (input >> 8) & 0xFF;
4563 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4567 device_printf(dev, "%02X\n", output);
4572 * Sysctl to write a byte to the I2C bus.
4574 * Input: 32-bit value:
4575 * bits 0-7: device address (0xA0 or 0xA2)
4576 * bits 8-15: offset (0-255)
4577 * bits 16-23: value to write
4578 * bits 24-31: unused
4579 * Output: 8-bit value written
4582 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4584 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4585 device_t dev = pf->dev;
4586 int input = -1, error = 0;
4587 u8 dev_addr, offset, value;
4589 /* Read in I2C write parameters */
4590 error = sysctl_handle_int(oidp, &input, 0, req);
4591 if ((error) || (req->newptr == NULL))
4593 /* Validate device address */
4594 dev_addr = input & 0xFF;
4595 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4598 offset = (input >> 8) & 0xFF;
4599 value = (input >> 16) & 0xFF;
4601 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4605 device_printf(dev, "%02X written\n", value);
4610 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4611 u8 bit_pos, int *is_set)
4613 device_t dev = pf->dev;
4614 struct i40e_hw *hw = &pf->hw;
4615 enum i40e_status_code status;
4617 status = i40e_aq_get_phy_capabilities(hw,
4618 FALSE, FALSE, abilities, NULL);
4621 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4622 __func__, i40e_stat_str(hw, status),
4623 i40e_aq_str(hw, hw->aq.asq_last_status));
4627 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4632 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4633 u8 bit_pos, int set)
4635 device_t dev = pf->dev;
4636 struct i40e_hw *hw = &pf->hw;
4637 struct i40e_aq_set_phy_config config;
4638 enum i40e_status_code status;
4640 /* Set new PHY config */
4641 memset(&config, 0, sizeof(config));
4642 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4644 config.fec_config |= bit_pos;
4645 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4646 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4647 config.phy_type = abilities->phy_type;
4648 config.phy_type_ext = abilities->phy_type_ext;
4649 config.link_speed = abilities->link_speed;
4650 config.eee_capability = abilities->eee_capability;
4651 config.eeer = abilities->eeer_val;
4652 config.low_power_ctrl = abilities->d3_lpan;
4653 status = i40e_aq_set_phy_config(hw, &config, NULL);
4657 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4658 __func__, i40e_stat_str(hw, status),
4659 i40e_aq_str(hw, hw->aq.asq_last_status));
4668 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4670 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4671 int mode, error = 0;
4673 struct i40e_aq_get_phy_abilities_resp abilities;
4674 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4677 /* Read in new mode */
4678 error = sysctl_handle_int(oidp, &mode, 0, req);
4679 if ((error) || (req->newptr == NULL))
4682 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4686 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4688 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4689 int mode, error = 0;
4691 struct i40e_aq_get_phy_abilities_resp abilities;
4692 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4695 /* Read in new mode */
4696 error = sysctl_handle_int(oidp, &mode, 0, req);
4697 if ((error) || (req->newptr == NULL))
4700 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4704 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4706 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4707 int mode, error = 0;
4709 struct i40e_aq_get_phy_abilities_resp abilities;
4710 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4713 /* Read in new mode */
4714 error = sysctl_handle_int(oidp, &mode, 0, req);
4715 if ((error) || (req->newptr == NULL))
4718 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4722 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4724 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4725 int mode, error = 0;
4727 struct i40e_aq_get_phy_abilities_resp abilities;
4728 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4731 /* Read in new mode */
4732 error = sysctl_handle_int(oidp, &mode, 0, req);
4733 if ((error) || (req->newptr == NULL))
4736 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4740 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4742 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4743 int mode, error = 0;
4745 struct i40e_aq_get_phy_abilities_resp abilities;
4746 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4749 /* Read in new mode */
4750 error = sysctl_handle_int(oidp, &mode, 0, req);
4751 if ((error) || (req->newptr == NULL))
4754 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4758 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4760 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4761 struct i40e_hw *hw = &pf->hw;
4762 device_t dev = pf->dev;
4765 enum i40e_status_code status;
4767 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4769 device_printf(dev, "Could not allocate sbuf for output.\n");
4774 /* This amount is only necessary if reading the entire cluster into memory */
4775 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4776 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4777 if (final_buff == NULL) {
4778 device_printf(dev, "Could not allocate memory for output.\n");
4781 int final_buff_len = 0;
4787 u16 curr_buff_size = 4096;
4788 u8 curr_next_table = 0;
4789 u32 curr_next_index = 0;
4795 sbuf_cat(buf, "\n");
4798 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4799 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4801 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4802 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4806 /* copy info out of temp buffer */
4807 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4808 final_buff_len += ret_buff_size;
4810 if (ret_next_table != curr_next_table) {
4811 /* We're done with the current table; we can dump out read data. */
4812 sbuf_printf(buf, "%d:", curr_next_table);
4813 int bytes_printed = 0;
4814 while (bytes_printed <= final_buff_len) {
4815 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4816 bytes_printed += 16;
4818 sbuf_cat(buf, "\n");
4820 /* The entire cluster has been read; we're finished */
4821 if (ret_next_table == 0xFF)
4824 /* Otherwise clear the output buffer and continue reading */
4825 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4829 if (ret_next_index == 0xFFFFFFFF)
4832 bzero(dump_buf, sizeof(dump_buf));
4833 curr_next_table = ret_next_table;
4834 curr_next_index = ret_next_index;
4838 free(final_buff, M_DEVBUF);
4840 error = sbuf_finish(buf);
4842 device_printf(dev, "Error finishing sbuf: %d\n", error);
4849 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4851 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4852 struct i40e_hw *hw = &pf->hw;
4853 device_t dev = pf->dev;
4855 int state, new_state;
4856 enum i40e_status_code status;
4857 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4859 /* Read in new mode */
4860 error = sysctl_handle_int(oidp, &new_state, 0, req);
4861 if ((error) || (req->newptr == NULL))
4864 /* Already in requested state */
4865 if (new_state == state)
4868 if (new_state == 0) {
4869 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4870 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4874 if (pf->hw.aq.api_maj_ver < 1 ||
4875 (pf->hw.aq.api_maj_ver == 1 &&
4876 pf->hw.aq.api_min_ver < 7)) {
4877 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4881 i40e_aq_stop_lldp(&pf->hw, true, NULL);
4882 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4883 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4885 status = i40e_aq_start_lldp(&pf->hw, NULL);
4886 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4887 device_printf(dev, "FW LLDP agent is already running\n");
4888 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4895 * Get FW LLDP Agent status
4898 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4900 enum i40e_status_code ret = I40E_SUCCESS;
4901 struct i40e_lldp_variables lldp_cfg;
4902 struct i40e_hw *hw = &pf->hw;
4905 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4909 /* Get the LLDP AdminStatus for the current port */
4910 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4913 /* Check if LLDP agent is disabled */
4915 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4916 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4918 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4924 ixl_attach_get_link_status(struct ixl_pf *pf)
4926 struct i40e_hw *hw = &pf->hw;
4927 device_t dev = pf->dev;
4930 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4931 (hw->aq.fw_maj_ver < 4)) {
4932 i40e_msec_delay(75);
4933 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4935 device_printf(dev, "link restart failed, aq_err=%d\n",
4936 pf->hw.aq.asq_last_status);
4941 /* Determine link state */
4942 hw->phy.get_link_info = TRUE;
4943 i40e_get_link_status(hw, &pf->link_up);
4948 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4950 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4951 int requested = 0, error = 0;
4953 /* Read in new mode */
4954 error = sysctl_handle_int(oidp, &requested, 0, req);
4955 if ((error) || (req->newptr == NULL))
4958 /* Initiate the PF reset later in the admin task */
4959 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4965 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4967 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4968 struct i40e_hw *hw = &pf->hw;
4969 int requested = 0, error = 0;
4971 /* Read in new mode */
4972 error = sysctl_handle_int(oidp, &requested, 0, req);
4973 if ((error) || (req->newptr == NULL))
4976 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4982 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4984 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4985 struct i40e_hw *hw = &pf->hw;
4986 int requested = 0, error = 0;
4988 /* Read in new mode */
4989 error = sysctl_handle_int(oidp, &requested, 0, req);
4990 if ((error) || (req->newptr == NULL))
4993 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4999 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5001 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5002 struct i40e_hw *hw = &pf->hw;
5003 int requested = 0, error = 0;
5005 /* Read in new mode */
5006 error = sysctl_handle_int(oidp, &requested, 0, req);
5007 if ((error) || (req->newptr == NULL))
5010 /* TODO: Find out how to bypass this */
5011 if (!(rd32(hw, 0x000B818C) & 0x1)) {
5012 device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5015 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5021 * Print out mapping of TX queue indexes and Rx queue indexes
5025 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5027 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5028 struct ixl_vsi *vsi = &pf->vsi;
5029 device_t dev = pf->dev;
5033 struct ixl_rx_queue *rx_que = vsi->rx_queues;
5034 struct ixl_tx_queue *tx_que = vsi->tx_queues;
5036 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5038 device_printf(dev, "Could not allocate sbuf for output.\n");
5042 sbuf_cat(buf, "\n");
5043 for (int i = 0; i < vsi->num_rx_queues; i++) {
5044 rx_que = &vsi->rx_queues[i];
5045 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5047 for (int i = 0; i < vsi->num_tx_queues; i++) {
5048 tx_que = &vsi->tx_queues[i];
5049 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5052 error = sbuf_finish(buf);
5054 device_printf(dev, "Error finishing sbuf: %d\n", error);