1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
51 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 const char * const ixl_fc_string[6] = {
107 static char *ixl_fec_string[3] = {
109 "CL74 FC-FEC/BASE-R",
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
121 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
126 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 IXL_NVM_VERSION_HI_SHIFT,
131 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 IXL_NVM_VERSION_LO_SHIFT,
134 oem_ver, oem_build, oem_patch);
138 ixl_print_nvm_version(struct ixl_pf *pf)
140 struct i40e_hw *hw = &pf->hw;
141 device_t dev = pf->dev;
144 sbuf = sbuf_new_auto();
145 ixl_nvm_version_str(hw, sbuf);
147 device_printf(dev, "%s\n", sbuf_data(sbuf));
152 ixl_configure_tx_itr(struct ixl_pf *pf)
154 struct i40e_hw *hw = &pf->hw;
155 struct ixl_vsi *vsi = &pf->vsi;
156 struct ixl_tx_queue *que = vsi->tx_queues;
158 vsi->tx_itr_setting = pf->tx_itr;
160 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 struct tx_ring *txr = &que->txr;
163 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 vsi->tx_itr_setting);
165 txr->itr = vsi->tx_itr_setting;
166 txr->latency = IXL_AVE_LATENCY;
171 ixl_configure_rx_itr(struct ixl_pf *pf)
173 struct i40e_hw *hw = &pf->hw;
174 struct ixl_vsi *vsi = &pf->vsi;
175 struct ixl_rx_queue *que = vsi->rx_queues;
177 vsi->rx_itr_setting = pf->rx_itr;
179 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 struct rx_ring *rxr = &que->rxr;
182 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 vsi->rx_itr_setting);
184 rxr->itr = vsi->rx_itr_setting;
185 rxr->latency = IXL_AVE_LATENCY;
190 * Write PF ITR values to queue ITR registers.
193 ixl_configure_itr(struct ixl_pf *pf)
195 ixl_configure_tx_itr(pf);
196 ixl_configure_rx_itr(pf);
199 /*********************************************************************
201 * Get the hardware capabilities
203 **********************************************************************/
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
208 struct i40e_aqc_list_capabilities_element_resp *buf;
209 struct i40e_hw *hw = &pf->hw;
210 device_t dev = pf->dev;
211 enum i40e_status_code status;
212 int len, i2c_intfc_num;
216 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
218 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 device_printf(dev, "Unable to allocate cap memory\n");
224 /* This populates the hw struct */
225 status = i40e_aq_discover_capabilities(hw, buf, len,
226 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
228 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
230 /* retry once with a larger buffer */
234 } else if (status != I40E_SUCCESS) {
235 device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
241 * Some devices have both MDIO and I2C; since this isn't reported
242 * by the FW, check registers to see if an I2C interface exists.
244 i2c_intfc_num = ixl_find_i2c_interface(pf);
245 if (i2c_intfc_num != -1)
248 /* Determine functions to use for driver I2C accesses */
249 switch (pf->i2c_access_method) {
251 if (hw->mac.type == I40E_MAC_XL710 &&
252 hw->aq.api_maj_ver == 1 &&
253 hw->aq.api_min_ver >= 7) {
254 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
257 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
263 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
267 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
271 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
275 /* Should not happen */
276 device_printf(dev, "Error setting I2C access functions\n");
280 /* Print a subset of the capability information. */
282 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
293 /* For the set_advertise sysctl */
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
297 device_t dev = pf->dev;
300 /* Make sure to initialize the device to the complete list of
301 * supported speeds on driver load, to ensure unloading and
302 * reloading the driver will restore this value.
304 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
306 /* Non-fatal error */
307 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
312 pf->advertised_speed =
313 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
319 enum i40e_status_code status = 0;
320 struct i40e_hw *hw = &pf->hw;
321 device_t dev = pf->dev;
323 /* Shutdown LAN HMC */
324 if (hw->hmc.hmc_obj) {
325 status = i40e_shutdown_lan_hmc(hw);
328 "init: LAN HMC shutdown failure; status %s\n",
329 i40e_stat_str(hw, status));
334 /* Shutdown admin queue */
335 ixl_disable_intr0(hw);
336 status = i40e_shutdown_adminq(hw);
339 "init: Admin Queue shutdown failure; status %s\n",
340 i40e_stat_str(hw, status));
342 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
348 ixl_reset(struct ixl_pf *pf)
350 struct i40e_hw *hw = &pf->hw;
351 device_t dev = pf->dev;
355 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
357 error = i40e_pf_reset(hw);
359 device_printf(dev, "init: PF reset failure\n");
364 error = i40e_init_adminq(hw);
366 device_printf(dev, "init: Admin queue init failure;"
367 " status code %d\n", error);
372 i40e_clear_pxe_mode(hw);
375 error = ixl_get_hw_capabilities(pf);
377 device_printf(dev, "init: Error retrieving HW capabilities;"
378 " status code %d\n", error);
382 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 hw->func_caps.num_rx_qp, 0, 0);
385 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
391 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
393 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
399 // XXX: possible fix for panic, but our failure recovery is still broken
400 error = ixl_switch_config(pf);
402 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
407 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
410 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 " aq_err %d\n", error, hw->aq.asq_last_status);
416 error = i40e_set_fc(hw, &set_fc_err_mask, true);
418 device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
423 // XXX: (Rebuild VSIs?)
425 /* Firmware delay workaround */
426 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 (hw->aq.fw_maj_ver < 4)) {
429 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
431 device_printf(dev, "init: link restart failed, aq_err %d\n",
432 hw->aq.asq_last_status);
438 /* Re-enable admin queue interrupt */
440 ixl_configure_intr0_msix(pf);
441 ixl_enable_intr0(hw);
447 ixl_rebuild_hw_structs_after_reset(pf);
449 /* The PF reset should have cleared any critical errors */
450 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
453 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 reg |= IXL_ICR0_CRIT_ERR_MASK;
455 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
462 * TODO: Make sure this properly handles admin queue / single rx queue intr
467 struct ixl_pf *pf = arg;
468 struct i40e_hw *hw = &pf->hw;
469 struct ixl_vsi *vsi = &pf->vsi;
470 struct ixl_rx_queue *que = vsi->rx_queues;
476 // TODO: Check against proper field
478 /* Clear PBA at start of ISR if using legacy interrupts */
480 wr32(hw, I40E_PFINT_DYN_CTL0,
481 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
485 icr0 = rd32(hw, I40E_PFINT_ICR0);
489 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 iflib_iov_intr_deferred(vsi->ctx);
493 // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 iflib_admin_intr_deferred(vsi->ctx);
497 // TODO: Is intr0 enabled somewhere else?
498 ixl_enable_intr0(hw);
500 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 return (FILTER_SCHEDULE_THREAD);
503 return (FILTER_HANDLED);
507 /*********************************************************************
509 * MSI-X VSI Interrupt Service routine
511 **********************************************************************/
513 ixl_msix_que(void *arg)
515 struct ixl_rx_queue *rx_que = arg;
519 ixl_set_queue_rx_itr(rx_que);
520 // ixl_set_queue_tx_itr(que);
522 return (FILTER_SCHEDULE_THREAD);
526 /*********************************************************************
528 * MSI-X Admin Queue Interrupt Service routine
530 **********************************************************************/
532 ixl_msix_adminq(void *arg)
534 struct ixl_pf *pf = arg;
535 struct i40e_hw *hw = &pf->hw;
536 device_t dev = pf->dev;
537 u32 reg, mask, rstat_reg;
538 bool do_task = FALSE;
540 DDPRINTF(dev, "begin");
544 reg = rd32(hw, I40E_PFINT_ICR0);
546 * For masking off interrupt causes that need to be handled before
547 * they can be re-enabled
549 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
551 /* Check on the cause */
552 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
557 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
563 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 device_printf(dev, "Reset Requested!\n");
566 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 device_printf(dev, "Reset type: ");
571 /* These others might be handled similarly to an EMPR reset */
572 case I40E_RESET_CORER:
575 case I40E_RESET_GLOBR:
578 case I40E_RESET_EMPR:
585 /* overload admin queue task to check reset progress */
586 atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
591 * PE / PCI / ECC exceptions are all handled in the same way:
592 * mask out these three causes, then request a PF reset
594 * TODO: I think at least ECC error requires a GLOBR, not PFR
596 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597 device_printf(dev, "ECC Error detected!\n");
598 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 device_printf(dev, "PCI Exception detected!\n");
600 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 /* Checks against the conditions above */
603 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 atomic_set_32(&pf->state,
606 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
610 // TODO: Linux driver never re-enables this interrupt once it has been detected
611 // Then what is supposed to happen? A PF reset? Should it never happen?
612 // TODO: Parse out this error into something human readable
613 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 device_printf(dev, "HMC Error detected!\n");
617 device_printf(dev, "INFO 0x%08x\n", reg);
618 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 device_printf(dev, "DATA 0x%08x\n", reg);
620 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
625 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 iflib_iov_intr_deferred(pf->vsi.ctx);
631 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 ixl_enable_intr0(hw);
635 return (FILTER_SCHEDULE_THREAD);
637 return (FILTER_HANDLED);
640 /*********************************************************************
643 * Routines for multicast and vlan filter management.
645 *********************************************************************/
647 ixl_add_multi(struct ixl_vsi *vsi)
649 struct ifmultiaddr *ifma;
650 struct ifnet *ifp = vsi->ifp;
651 struct i40e_hw *hw = vsi->hw;
654 IOCTL_DEBUGOUT("ixl_add_multi: begin");
658 ** First just get a count, to decide if we
659 ** we simply use multicast promiscuous.
661 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
662 if (ifma->ifma_addr->sa_family != AF_LINK)
666 if_maddr_runlock(ifp);
668 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
669 /* delete existing MC filters */
670 ixl_del_hw_filters(vsi, mcnt);
671 i40e_aq_set_vsi_multicast_promiscuous(hw,
672 vsi->seid, TRUE, NULL);
678 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
679 if (ifma->ifma_addr->sa_family != AF_LINK)
681 ixl_add_mc_filter(vsi,
682 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
685 if_maddr_runlock(ifp);
687 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
688 ixl_add_hw_filters(vsi, flags, mcnt);
691 IOCTL_DEBUGOUT("ixl_add_multi: end");
695 ixl_del_multi(struct ixl_vsi *vsi)
697 struct ifnet *ifp = vsi->ifp;
698 struct ifmultiaddr *ifma;
699 struct ixl_mac_filter *f;
703 IOCTL_DEBUGOUT("ixl_del_multi: begin");
705 /* Search for removed multicast addresses */
707 SLIST_FOREACH(f, &vsi->ftl, next) {
708 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
710 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
711 if (ifma->ifma_addr->sa_family != AF_LINK)
713 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
714 if (cmp_etheraddr(f->macaddr, mc_addr)) {
719 if (match == FALSE) {
720 f->flags |= IXL_FILTER_DEL;
725 if_maddr_runlock(ifp);
728 ixl_del_hw_filters(vsi, mcnt);
734 ixl_link_up_msg(struct ixl_pf *pf)
736 struct i40e_hw *hw = &pf->hw;
737 struct ifnet *ifp = pf->vsi.ifp;
738 char *req_fec_string, *neg_fec_string;
741 fec_abilities = hw->phy.link_info.req_fec_info;
742 /* If both RS and KR are requested, only show RS */
743 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
744 req_fec_string = ixl_fec_string[0];
745 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
746 req_fec_string = ixl_fec_string[1];
748 req_fec_string = ixl_fec_string[2];
750 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
751 neg_fec_string = ixl_fec_string[0];
752 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
753 neg_fec_string = ixl_fec_string[1];
755 neg_fec_string = ixl_fec_string[2];
757 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
759 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
760 req_fec_string, neg_fec_string,
761 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
762 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
763 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
764 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
765 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
766 ixl_fc_string[1] : ixl_fc_string[0]);
770 * Configure admin queue/misc interrupt cause registers in hardware.
773 ixl_configure_intr0_msix(struct ixl_pf *pf)
775 struct i40e_hw *hw = &pf->hw;
778 /* First set up the adminq - vector 0 */
779 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
780 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
782 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
783 I40E_PFINT_ICR0_ENA_GRST_MASK |
784 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
785 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
786 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
787 I40E_PFINT_ICR0_ENA_VFLR_MASK |
788 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
789 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
790 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
793 * 0x7FF is the end of the queue list.
794 * This means we won't use MSI-X vector 0 for a queue interrupt
797 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
798 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
799 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
801 wr32(hw, I40E_PFINT_DYN_CTL0,
802 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
803 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
805 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
809 * Configure queue interrupt cause registers in hardware.
811 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
814 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
816 struct i40e_hw *hw = &pf->hw;
817 struct ixl_vsi *vsi = &pf->vsi;
821 // TODO: See if max is really necessary
822 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
823 /* Make sure interrupt is disabled */
824 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
825 /* Set linked list head to point to corresponding RX queue
826 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
827 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
828 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
829 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
830 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
831 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
833 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
834 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
835 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
836 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
837 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
838 wr32(hw, I40E_QINT_RQCTL(i), reg);
840 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
841 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
842 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
843 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
844 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
845 wr32(hw, I40E_QINT_TQCTL(i), reg);
850 * Configure for single interrupt vector operation
853 ixl_configure_legacy(struct ixl_pf *pf)
855 struct i40e_hw *hw = &pf->hw;
856 struct ixl_vsi *vsi = &pf->vsi;
862 vsi->tx_itr_setting = pf->tx_itr;
863 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
864 vsi->tx_itr_setting);
865 txr->itr = vsi->tx_itr_setting;
867 vsi->rx_itr_setting = pf->rx_itr;
868 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
869 vsi->rx_itr_setting);
870 rxr->itr = vsi->rx_itr_setting;
871 /* XXX: Assuming only 1 queue in single interrupt mode */
873 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
875 /* Setup "other" causes */
876 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
877 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
878 | I40E_PFINT_ICR0_ENA_GRST_MASK
879 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
880 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
881 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
882 | I40E_PFINT_ICR0_ENA_VFLR_MASK
883 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
885 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
887 /* No ITR for non-queue interrupts */
888 wr32(hw, I40E_PFINT_STAT_CTL0,
889 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
891 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
892 wr32(hw, I40E_PFINT_LNKLST0, 0);
894 /* Associate the queue pair to the vector and enable the q int */
895 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
896 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
897 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
898 wr32(hw, I40E_QINT_RQCTL(0), reg);
900 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
901 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
902 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
903 wr32(hw, I40E_QINT_TQCTL(0), reg);
907 ixl_free_pci_resources(struct ixl_pf *pf)
909 struct ixl_vsi *vsi = &pf->vsi;
910 device_t dev = iflib_get_dev(vsi->ctx);
911 struct ixl_rx_queue *rx_que = vsi->rx_queues;
913 /* We may get here before stations are set up */
918 ** Release all MSI-X VSI resources:
920 iflib_irq_free(vsi->ctx, &vsi->irq);
922 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
923 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
925 if (pf->pci_mem != NULL)
926 bus_release_resource(dev, SYS_RES_MEMORY,
927 rman_get_rid(pf->pci_mem), pf->pci_mem);
931 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
933 /* Display supported media types */
934 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
935 ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
937 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
938 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
939 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
940 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
941 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
942 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
944 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
945 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
946 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
947 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
949 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
950 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
951 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
952 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
953 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
954 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
956 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
957 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
958 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
959 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
960 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
961 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
962 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
963 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
964 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
965 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
967 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
968 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
970 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
971 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
972 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
973 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
974 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
975 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
976 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
977 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
978 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
979 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
980 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
982 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
983 ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
985 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
986 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
987 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
988 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
990 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
991 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
992 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
993 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
994 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
995 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
996 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
997 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
998 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
999 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1000 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1001 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1004 /*********************************************************************
1006 * Setup networking device structure and register an interface.
1008 **********************************************************************/
1010 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1012 struct ixl_vsi *vsi = &pf->vsi;
1013 if_ctx_t ctx = vsi->ctx;
1014 struct i40e_hw *hw = &pf->hw;
1015 struct ifnet *ifp = iflib_get_ifp(ctx);
1016 struct i40e_aq_get_phy_abilities_resp abilities;
1017 enum i40e_status_code aq_error = 0;
1019 INIT_DBG_DEV(dev, "begin");
1021 vsi->shared->isc_max_frame_size =
1022 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1023 + ETHER_VLAN_ENCAP_LEN;
1025 aq_error = i40e_aq_get_phy_capabilities(hw,
1026 FALSE, TRUE, &abilities, NULL);
1027 /* May need delay to detect fiber correctly */
1028 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1029 /* TODO: Maybe just retry this in a task... */
1030 i40e_msec_delay(200);
1031 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1032 TRUE, &abilities, NULL);
1035 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1036 device_printf(dev, "Unknown PHY type detected!\n");
1039 "Error getting supported media types, err %d,"
1040 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1042 pf->supported_speeds = abilities.link_speed;
1043 #if __FreeBSD_version >= 1100000
1044 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1046 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1049 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1052 /* Use autoselect media by default */
1053 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1054 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1060 * Input: bitmap of enum i40e_aq_link_speed
1063 ixl_max_aq_speed_to_value(u8 link_speeds)
1065 if (link_speeds & I40E_LINK_SPEED_40GB)
1067 if (link_speeds & I40E_LINK_SPEED_25GB)
1069 if (link_speeds & I40E_LINK_SPEED_20GB)
1071 if (link_speeds & I40E_LINK_SPEED_10GB)
1073 if (link_speeds & I40E_LINK_SPEED_1GB)
1075 if (link_speeds & I40E_LINK_SPEED_100MB)
1076 return IF_Mbps(100);
1078 /* Minimum supported link speed */
1079 return IF_Mbps(100);
1083 ** Run when the Admin Queue gets a link state change interrupt.
1086 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1088 struct i40e_hw *hw = &pf->hw;
1089 device_t dev = iflib_get_dev(pf->vsi.ctx);
1090 struct i40e_aqc_get_link_status *status =
1091 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1093 /* Request link status from adapter */
1094 hw->phy.get_link_info = TRUE;
1095 i40e_get_link_status(hw, &pf->link_up);
1097 /* Print out message if an unqualified module is found */
1098 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1099 (pf->advertised_speed) &&
1100 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1101 (!(status->link_info & I40E_AQ_LINK_UP)))
1102 device_printf(dev, "Link failed because "
1103 "an unqualified module was detected!\n");
1105 /* OS link info is updated elsewhere */
1108 /*********************************************************************
1110 * Get Firmware Switch configuration
1111 * - this will need to be more robust when more complex
1112 * switch configurations are enabled.
1114 **********************************************************************/
1116 ixl_switch_config(struct ixl_pf *pf)
1118 struct i40e_hw *hw = &pf->hw;
1119 struct ixl_vsi *vsi = &pf->vsi;
1120 device_t dev = iflib_get_dev(vsi->ctx);
1121 struct i40e_aqc_get_switch_config_resp *sw_config;
1122 u8 aq_buf[I40E_AQ_LARGE_BUF];
1126 memset(&aq_buf, 0, sizeof(aq_buf));
1127 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1128 ret = i40e_aq_get_switch_config(hw, sw_config,
1129 sizeof(aq_buf), &next, NULL);
1131 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1132 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1135 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1137 "Switch config: header reported: %d in structure, %d total\n",
1138 sw_config->header.num_reported, sw_config->header.num_total);
1139 for (int i = 0; i < sw_config->header.num_reported; i++) {
1141 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1142 sw_config->element[i].element_type,
1143 sw_config->element[i].seid,
1144 sw_config->element[i].uplink_seid,
1145 sw_config->element[i].downlink_seid);
1148 /* Simplified due to a single VSI */
1149 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1150 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1151 vsi->seid = sw_config->element[0].seid;
1155 /*********************************************************************
1157 * Initialize the VSI: this handles contexts, which means things
1158 * like the number of descriptors, buffer size,
1159 * plus we init the rings thru this function.
1161 **********************************************************************/
1163 ixl_initialize_vsi(struct ixl_vsi *vsi)
1165 struct ixl_pf *pf = vsi->back;
1166 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
1167 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1168 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1169 device_t dev = iflib_get_dev(vsi->ctx);
1170 struct i40e_hw *hw = vsi->hw;
1171 struct i40e_vsi_context ctxt;
1175 memset(&ctxt, 0, sizeof(ctxt));
1176 ctxt.seid = vsi->seid;
1177 if (pf->veb_seid != 0)
1178 ctxt.uplink_seid = pf->veb_seid;
1179 ctxt.pf_num = hw->pf_id;
1180 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1182 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1183 " aq_error %d\n", err, hw->aq.asq_last_status);
1186 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1187 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1188 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1189 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1190 ctxt.uplink_seid, ctxt.vsi_number,
1191 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1192 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1193 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1195 ** Set the queue and traffic class bits
1196 ** - when multiple traffic classes are supported
1197 ** this will need to be more robust.
1199 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1200 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1201 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1202 ctxt.info.queue_mapping[0] = 0;
1204 * This VSI will only use traffic class 0; start traffic class 0's
1205 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1206 * the driver may not use all of them).
1208 tc_queues = fls(pf->qtag.num_allocated) - 1;
1209 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1210 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1211 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1212 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1214 /* Set VLAN receive stripping mode */
1215 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1216 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1217 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1218 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1220 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1223 /* Set TCP Enable for iWARP capable VSI */
1224 if (ixl_enable_iwarp && pf->iw_enabled) {
1225 ctxt.info.valid_sections |=
1226 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1227 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1230 /* Save VSI number and info for use later */
1231 vsi->vsi_num = ctxt.vsi_number;
1232 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1234 /* Reset VSI statistics */
1235 ixl_vsi_reset_stats(vsi);
1236 vsi->hw_filters_add = 0;
1237 vsi->hw_filters_del = 0;
1239 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1241 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1243 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1244 " aq_error %d\n", err, hw->aq.asq_last_status);
1248 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1249 struct tx_ring *txr = &tx_que->txr;
1250 struct i40e_hmc_obj_txq tctx;
1253 /* Setup the HMC TX Context */
1254 bzero(&tctx, sizeof(tctx));
1255 tctx.new_context = 1;
1256 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1257 tctx.qlen = scctx->isc_ntxd[0];
1258 tctx.fc_ena = 0; /* Disable FCoE */
1260 * This value needs to pulled from the VSI that this queue
1261 * is assigned to. Index into array is traffic class.
1263 tctx.rdylist = vsi->info.qs_handle[0];
1265 * Set these to enable Head Writeback
1266 * - Address is last entry in TX ring (reserved for HWB index)
1267 * Leave these as 0 for Descriptor Writeback
1269 if (vsi->enable_head_writeback) {
1270 tctx.head_wb_ena = 1;
1271 tctx.head_wb_addr = txr->tx_paddr +
1272 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1274 tctx.head_wb_ena = 0;
1275 tctx.head_wb_addr = 0;
1277 tctx.rdylist_act = 0;
1278 err = i40e_clear_lan_tx_queue_context(hw, i);
1280 device_printf(dev, "Unable to clear TX context\n");
1283 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1285 device_printf(dev, "Unable to set TX context\n");
1288 /* Associate the ring with this PF */
1289 txctl = I40E_QTX_CTL_PF_QUEUE;
1290 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1291 I40E_QTX_CTL_PF_INDX_MASK);
1292 wr32(hw, I40E_QTX_CTL(i), txctl);
1295 /* Do ring (re)init */
1296 ixl_init_tx_ring(vsi, tx_que);
1298 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1299 struct rx_ring *rxr = &rx_que->rxr;
1300 struct i40e_hmc_obj_rxq rctx;
1302 /* Next setup the HMC RX Context */
1303 if (scctx->isc_max_frame_size <= MCLBYTES)
1304 rxr->mbuf_sz = MCLBYTES;
1306 rxr->mbuf_sz = MJUMPAGESIZE;
1308 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1310 /* Set up an RX context for the HMC */
1311 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1312 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1313 /* ignore header split for now */
1314 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1315 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1316 scctx->isc_max_frame_size : max_rxmax;
1318 rctx.dsize = 1; /* do 32byte descriptors */
1319 rctx.hsplit_0 = 0; /* no header split */
1320 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1321 rctx.qlen = scctx->isc_nrxd[0];
1322 rctx.tphrdesc_ena = 1;
1323 rctx.tphwdesc_ena = 1;
1324 rctx.tphdata_ena = 0; /* Header Split related */
1325 rctx.tphhead_ena = 0; /* Header Split related */
1326 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
1329 rctx.showiv = 1; /* Strip inner VLAN header */
1330 rctx.fc_ena = 0; /* Disable FCoE */
1331 rctx.prefena = 1; /* Prefetch descriptors */
1333 err = i40e_clear_lan_rx_queue_context(hw, i);
1336 "Unable to clear RX context %d\n", i);
1339 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1341 device_printf(dev, "Unable to set RX context %d\n", i);
1344 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1350 ixl_free_mac_filters(struct ixl_vsi *vsi)
1352 struct ixl_mac_filter *f;
1354 while (!SLIST_EMPTY(&vsi->ftl)) {
1355 f = SLIST_FIRST(&vsi->ftl);
1356 SLIST_REMOVE_HEAD(&vsi->ftl, next);
1362 ** Provide a update to the queue RX
1363 ** interrupt moderation value.
1366 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1368 struct ixl_vsi *vsi = que->vsi;
1369 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1370 struct i40e_hw *hw = vsi->hw;
1371 struct rx_ring *rxr = &que->rxr;
1376 /* Idle, do nothing */
1377 if (rxr->bytes == 0)
1380 if (pf->dynamic_rx_itr) {
1381 rx_bytes = rxr->bytes/rxr->itr;
1384 /* Adjust latency range */
1385 switch (rxr->latency) {
1386 case IXL_LOW_LATENCY:
1387 if (rx_bytes > 10) {
1388 rx_latency = IXL_AVE_LATENCY;
1389 rx_itr = IXL_ITR_20K;
1392 case IXL_AVE_LATENCY:
1393 if (rx_bytes > 20) {
1394 rx_latency = IXL_BULK_LATENCY;
1395 rx_itr = IXL_ITR_8K;
1396 } else if (rx_bytes <= 10) {
1397 rx_latency = IXL_LOW_LATENCY;
1398 rx_itr = IXL_ITR_100K;
1401 case IXL_BULK_LATENCY:
1402 if (rx_bytes <= 20) {
1403 rx_latency = IXL_AVE_LATENCY;
1404 rx_itr = IXL_ITR_20K;
1409 rxr->latency = rx_latency;
1411 if (rx_itr != rxr->itr) {
1412 /* do an exponential smoothing */
1413 rx_itr = (10 * rx_itr * rxr->itr) /
1414 ((9 * rx_itr) + rxr->itr);
1415 rxr->itr = min(rx_itr, IXL_MAX_ITR);
1416 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1417 rxr->me), rxr->itr);
1419 } else { /* We may have have toggled to non-dynamic */
1420 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1421 vsi->rx_itr_setting = pf->rx_itr;
1422 /* Update the hardware if needed */
1423 if (rxr->itr != vsi->rx_itr_setting) {
1424 rxr->itr = vsi->rx_itr_setting;
1425 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1426 rxr->me), rxr->itr);
1435 ** Provide a update to the queue TX
1436 ** interrupt moderation value.
1439 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1441 struct ixl_vsi *vsi = que->vsi;
1442 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1443 struct i40e_hw *hw = vsi->hw;
1444 struct tx_ring *txr = &que->txr;
1450 /* Idle, do nothing */
1451 if (txr->bytes == 0)
1454 if (pf->dynamic_tx_itr) {
1455 tx_bytes = txr->bytes/txr->itr;
1458 switch (txr->latency) {
1459 case IXL_LOW_LATENCY:
1460 if (tx_bytes > 10) {
1461 tx_latency = IXL_AVE_LATENCY;
1462 tx_itr = IXL_ITR_20K;
1465 case IXL_AVE_LATENCY:
1466 if (tx_bytes > 20) {
1467 tx_latency = IXL_BULK_LATENCY;
1468 tx_itr = IXL_ITR_8K;
1469 } else if (tx_bytes <= 10) {
1470 tx_latency = IXL_LOW_LATENCY;
1471 tx_itr = IXL_ITR_100K;
1474 case IXL_BULK_LATENCY:
1475 if (tx_bytes <= 20) {
1476 tx_latency = IXL_AVE_LATENCY;
1477 tx_itr = IXL_ITR_20K;
1482 txr->latency = tx_latency;
1484 if (tx_itr != txr->itr) {
1485 /* do an exponential smoothing */
1486 tx_itr = (10 * tx_itr * txr->itr) /
1487 ((9 * tx_itr) + txr->itr);
1488 txr->itr = min(tx_itr, IXL_MAX_ITR);
1489 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1490 txr->me), txr->itr);
1493 } else { /* We may have have toggled to non-dynamic */
1494 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1495 vsi->tx_itr_setting = pf->tx_itr;
1496 /* Update the hardware if needed */
1497 if (txr->itr != vsi->tx_itr_setting) {
1498 txr->itr = vsi->tx_itr_setting;
1499 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1500 txr->me), txr->itr);
1510 * ixl_sysctl_qtx_tail_handler
1511 * Retrieves I40E_QTX_TAIL value from hardware
1515 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1517 struct ixl_tx_queue *tx_que;
1521 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1522 if (!tx_que) return 0;
1524 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1525 error = sysctl_handle_int(oidp, &val, 0, req);
1526 if (error || !req->newptr)
1532 * ixl_sysctl_qrx_tail_handler
1533 * Retrieves I40E_QRX_TAIL value from hardware
1537 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1539 struct ixl_rx_queue *rx_que;
1543 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1544 if (!rx_que) return 0;
1546 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1547 error = sysctl_handle_int(oidp, &val, 0, req);
1548 if (error || !req->newptr)
1555 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1556 * Writes to the ITR registers immediately.
1559 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1561 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1562 device_t dev = pf->dev;
1564 int requested_tx_itr;
1566 requested_tx_itr = pf->tx_itr;
1567 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1568 if ((error) || (req->newptr == NULL))
1570 if (pf->dynamic_tx_itr) {
1572 "Cannot set TX itr value while dynamic TX itr is enabled\n");
1575 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1577 "Invalid TX itr value; value must be between 0 and %d\n",
1582 pf->tx_itr = requested_tx_itr;
1583 ixl_configure_tx_itr(pf);
1589 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1590 * Writes to the ITR registers immediately.
1593 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1595 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1596 device_t dev = pf->dev;
1598 int requested_rx_itr;
1600 requested_rx_itr = pf->rx_itr;
1601 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1602 if ((error) || (req->newptr == NULL))
1604 if (pf->dynamic_rx_itr) {
1606 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1609 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1611 "Invalid RX itr value; value must be between 0 and %d\n",
1616 pf->rx_itr = requested_rx_itr;
1617 ixl_configure_rx_itr(pf);
1623 ixl_add_hw_stats(struct ixl_pf *pf)
1625 struct ixl_vsi *vsi = &pf->vsi;
1626 device_t dev = iflib_get_dev(vsi->ctx);
1627 struct i40e_hw_port_stats *pf_stats = &pf->stats;
1629 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1630 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1631 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1633 /* Driver statistics */
1634 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1635 CTLFLAG_RD, &pf->admin_irq,
1636 "Admin Queue IRQs received");
1638 ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1640 ixl_add_queues_sysctls(dev, vsi);
1642 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1646 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1647 struct sysctl_oid_list *child,
1648 struct i40e_hw_port_stats *stats)
1650 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1651 CTLFLAG_RD, NULL, "Mac Statistics");
1652 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1654 struct i40e_eth_stats *eth_stats = &stats->eth;
1655 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1657 struct ixl_sysctl_info ctls[] =
1659 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1660 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1661 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1662 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1663 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1664 /* Packet Reception Stats */
1665 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1666 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1667 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1668 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1669 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1670 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1671 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1672 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1673 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1674 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1675 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1676 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1677 /* Packet Transmission Stats */
1678 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1679 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1680 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1681 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1682 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1683 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1684 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1686 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1687 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1688 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1689 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1694 struct ixl_sysctl_info *entry = ctls;
1695 while (entry->stat != 0)
1697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1698 CTLFLAG_RD, entry->stat,
1699 entry->description);
1705 ixl_set_rss_key(struct ixl_pf *pf)
1707 struct i40e_hw *hw = &pf->hw;
1708 struct ixl_vsi *vsi = &pf->vsi;
1709 device_t dev = pf->dev;
1710 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1711 enum i40e_status_code status;
1714 /* Fetch the configured RSS key */
1715 rss_getkey((uint8_t *) &rss_seed);
1717 ixl_get_default_rss_key(rss_seed);
1719 /* Fill out hash function seed */
1720 if (hw->mac.type == I40E_MAC_X722) {
1721 struct i40e_aqc_get_set_rss_key_data key_data;
1722 bcopy(rss_seed, &key_data, 52);
1723 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1726 "i40e_aq_set_rss_key status %s, error %s\n",
1727 i40e_stat_str(hw, status),
1728 i40e_aq_str(hw, hw->aq.asq_last_status));
1730 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1731 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1736 * Configure enabled PCTYPES for RSS.
1739 ixl_set_rss_pctypes(struct ixl_pf *pf)
1741 struct i40e_hw *hw = &pf->hw;
1742 u64 set_hena = 0, hena;
1745 u32 rss_hash_config;
1747 rss_hash_config = rss_gethashconfig();
1748 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1749 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1750 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1751 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1752 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1753 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1754 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1755 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1756 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1757 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1758 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1759 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1760 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1761 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1763 if (hw->mac.type == I40E_MAC_X722)
1764 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1766 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1768 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1769 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1771 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1772 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1777 ixl_set_rss_hlut(struct ixl_pf *pf)
1779 struct i40e_hw *hw = &pf->hw;
1780 struct ixl_vsi *vsi = &pf->vsi;
1781 device_t dev = iflib_get_dev(vsi->ctx);
1783 int lut_entry_width;
1785 enum i40e_status_code status;
1787 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1789 /* Populate the LUT with max no. of queues in round robin fashion */
1791 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1794 * Fetch the RSS bucket id for the given indirection entry.
1795 * Cap it at the number of configured buckets (which is
1798 que_id = rss_get_indirection_to_bucket(i);
1799 que_id = que_id % vsi->num_rx_queues;
1801 que_id = i % vsi->num_rx_queues;
1803 lut = (que_id & ((0x1 << lut_entry_width) - 1));
1807 if (hw->mac.type == I40E_MAC_X722) {
1808 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1810 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1811 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1813 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1814 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1820 ** Setup the PF's RSS parameters.
1823 ixl_config_rss(struct ixl_pf *pf)
1825 ixl_set_rss_key(pf);
1826 ixl_set_rss_pctypes(pf);
1827 ixl_set_rss_hlut(pf);
1831 ** This routine updates vlan filters, called by init
1832 ** it scans the filter table and then updates the hw
1833 ** after a soft reset.
1836 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1838 struct ixl_mac_filter *f;
1841 if (vsi->num_vlans == 0)
1844 ** Scan the filter list for vlan entries,
1845 ** mark them for addition and then call
1846 ** for the AQ update.
1848 SLIST_FOREACH(f, &vsi->ftl, next) {
1849 if (f->flags & IXL_FILTER_VLAN) {
1857 printf("setup vlan: no filters found!\n");
1860 flags = IXL_FILTER_VLAN;
1861 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1862 ixl_add_hw_filters(vsi, flags, cnt);
1866 * In some firmware versions there is default MAC/VLAN filter
1867 * configured which interferes with filters managed by driver.
1868 * Make sure it's removed.
1871 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1873 struct i40e_aqc_remove_macvlan_element_data e;
1875 bzero(&e, sizeof(e));
1876 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1878 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1879 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1881 bzero(&e, sizeof(e));
1882 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1884 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1885 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1886 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1890 ** Initialize filter list and add filters that the hardware
1891 ** needs to know about.
1893 ** Requires VSI's filter list & seid to be set before calling.
1896 ixl_init_filters(struct ixl_vsi *vsi)
1898 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1900 /* Initialize mac filter list for VSI */
1901 SLIST_INIT(&vsi->ftl);
1903 /* Receive broadcast Ethernet frames */
1904 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1906 ixl_del_default_hw_filters(vsi);
1908 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1910 * Prevent Tx flow control frames from being sent out by
1911 * non-firmware transmitters.
1912 * This affects every VSI in the PF.
1914 if (pf->enable_tx_fc_filter)
1915 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1919 ** This routine adds mulicast filters
1922 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1924 struct ixl_mac_filter *f;
1926 /* Does one already exist */
1927 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1931 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1933 f->flags |= IXL_FILTER_MC;
1935 printf("WARNING: no filter available!!\n");
1939 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1941 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1945 * This routine adds a MAC/VLAN filter to the software filter
1946 * list, then adds that new filter to the HW if it doesn't already
1947 * exist in the SW filter list.
1950 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1952 struct ixl_mac_filter *f, *tmp;
1956 DEBUGOUT("ixl_add_filter: begin");
1961 /* Does one already exist */
1962 f = ixl_find_filter(vsi, macaddr, vlan);
1966 ** Is this the first vlan being registered, if so we
1967 ** need to remove the ANY filter that indicates we are
1968 ** not in a vlan, and replace that with a 0 filter.
1970 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1971 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1973 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1974 ixl_add_filter(vsi, macaddr, 0);
1978 f = ixl_new_filter(vsi, macaddr, vlan);
1980 device_printf(dev, "WARNING: no filter available!!\n");
1983 if (f->vlan != IXL_VLAN_ANY)
1984 f->flags |= IXL_FILTER_VLAN;
1988 f->flags |= IXL_FILTER_USED;
1989 ixl_add_hw_filters(vsi, f->flags, 1);
1993 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1995 struct ixl_mac_filter *f;
1997 f = ixl_find_filter(vsi, macaddr, vlan);
2001 f->flags |= IXL_FILTER_DEL;
2002 ixl_del_hw_filters(vsi, 1);
2003 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2006 /* Check if this is the last vlan removal */
2007 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2008 /* Switch back to a non-vlan filter */
2009 ixl_del_filter(vsi, macaddr, 0);
2010 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2016 ** Find the filter with both matching mac addr and vlan id
2018 struct ixl_mac_filter *
2019 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2021 struct ixl_mac_filter *f;
2023 SLIST_FOREACH(f, &vsi->ftl, next) {
2024 if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2025 && (f->vlan == vlan)) {
2034 ** This routine takes additions to the vsi filter
2035 ** table and creates an Admin Queue call to create
2036 ** the filters in the hardware.
2039 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2041 struct i40e_aqc_add_macvlan_element_data *a, *b;
2042 struct ixl_mac_filter *f;
2046 enum i40e_status_code status;
2054 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2058 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2059 M_DEVBUF, M_NOWAIT | M_ZERO);
2061 device_printf(dev, "add_hw_filters failed to get memory\n");
2066 ** Scan the filter list, each time we find one
2067 ** we add it to the admin queue array and turn off
2070 SLIST_FOREACH(f, &vsi->ftl, next) {
2071 if ((f->flags & flags) == flags) {
2072 b = &a[j]; // a pox on fvl long names :)
2073 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2074 if (f->vlan == IXL_VLAN_ANY) {
2076 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2078 b->vlan_tag = f->vlan;
2081 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2082 f->flags &= ~IXL_FILTER_ADD;
2085 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2086 MAC_FORMAT_ARGS(f->macaddr));
2092 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2094 device_printf(dev, "i40e_aq_add_macvlan status %s, "
2095 "error %s\n", i40e_stat_str(hw, status),
2096 i40e_aq_str(hw, hw->aq.asq_last_status));
2098 vsi->hw_filters_add += j;
2105 ** This routine takes removals in the vsi filter
2106 ** table and creates an Admin Queue call to delete
2107 ** the filters in the hardware.
2110 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2112 struct i40e_aqc_remove_macvlan_element_data *d, *e;
2116 struct ixl_mac_filter *f, *f_temp;
2117 enum i40e_status_code status;
2124 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2125 M_DEVBUF, M_NOWAIT | M_ZERO);
2127 device_printf(dev, "%s: failed to get memory\n", __func__);
2131 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2132 if (f->flags & IXL_FILTER_DEL) {
2133 e = &d[j]; // a pox on fvl long names :)
2134 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2135 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2136 if (f->vlan == IXL_VLAN_ANY) {
2138 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2140 e->vlan_tag = f->vlan;
2143 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2144 MAC_FORMAT_ARGS(f->macaddr));
2146 /* delete entry from vsi list */
2147 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2155 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2158 for (int i = 0; i < j; i++)
2159 sc += (!d[i].error_code);
2160 vsi->hw_filters_del += sc;
2162 "Failed to remove %d/%d filters, error %s\n",
2163 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2165 vsi->hw_filters_del += j;
2172 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2174 struct i40e_hw *hw = &pf->hw;
2179 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2181 ixl_dbg(pf, IXL_DBG_EN_DIS,
2182 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2185 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2187 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2188 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2189 I40E_QTX_ENA_QENA_STAT_MASK;
2190 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2191 /* Verify the enable took */
2192 for (int j = 0; j < 10; j++) {
2193 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2194 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2196 i40e_usec_delay(10);
2198 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2199 device_printf(pf->dev, "TX queue %d still disabled!\n",
2208 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2210 struct i40e_hw *hw = &pf->hw;
2215 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2217 ixl_dbg(pf, IXL_DBG_EN_DIS,
2218 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2221 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2222 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2223 I40E_QRX_ENA_QENA_STAT_MASK;
2224 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2225 /* Verify the enable took */
2226 for (int j = 0; j < 10; j++) {
2227 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2228 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2230 i40e_usec_delay(10);
2232 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2233 device_printf(pf->dev, "RX queue %d still disabled!\n",
2242 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2246 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2247 /* Called function already prints error message */
2250 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2254 /* For PF VSI only */
2256 ixl_enable_rings(struct ixl_vsi *vsi)
2258 struct ixl_pf *pf = vsi->back;
2261 for (int i = 0; i < vsi->num_tx_queues; i++)
2262 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2264 for (int i = 0; i < vsi->num_rx_queues; i++)
2265 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2271 * Returns error on first ring that is detected hung.
2274 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2276 struct i40e_hw *hw = &pf->hw;
2281 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2283 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2284 i40e_usec_delay(500);
2286 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2287 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2288 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2289 /* Verify the disable took */
2290 for (int j = 0; j < 10; j++) {
2291 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2292 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2294 i40e_msec_delay(10);
2296 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2297 device_printf(pf->dev, "TX queue %d still enabled!\n",
2306 * Returns error on first ring that is detected hung.
2309 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2311 struct i40e_hw *hw = &pf->hw;
2316 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2318 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2319 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2320 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2321 /* Verify the disable took */
2322 for (int j = 0; j < 10; j++) {
2323 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2324 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2326 i40e_msec_delay(10);
2328 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2329 device_printf(pf->dev, "RX queue %d still enabled!\n",
2338 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2342 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2343 /* Called function already prints error message */
2346 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2351 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2355 for (int i = 0; i < vsi->num_tx_queues; i++)
2356 error = ixl_disable_tx_ring(pf, qtag, i);
2358 for (int i = 0; i < vsi->num_rx_queues; i++)
2359 error = ixl_disable_rx_ring(pf, qtag, i);
2365 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2367 struct i40e_hw *hw = &pf->hw;
2368 device_t dev = pf->dev;
2370 bool mdd_detected = false;
2371 bool pf_mdd_detected = false;
2372 bool vf_mdd_detected = false;
2375 u8 pf_mdet_num, vp_mdet_num;
2378 /* find what triggered the MDD event */
2379 reg = rd32(hw, I40E_GL_MDET_TX);
2380 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2381 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2382 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2383 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2384 I40E_GL_MDET_TX_VF_NUM_SHIFT;
2385 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2386 I40E_GL_MDET_TX_EVENT_SHIFT;
2387 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2388 I40E_GL_MDET_TX_QUEUE_SHIFT;
2389 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2390 mdd_detected = true;
2396 reg = rd32(hw, I40E_PF_MDET_TX);
2397 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2398 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2399 pf_mdet_num = hw->pf_id;
2400 pf_mdd_detected = true;
2403 /* Check if MDD was caused by a VF */
2404 for (int i = 0; i < pf->num_vfs; i++) {
2406 reg = rd32(hw, I40E_VP_MDET_TX(i));
2407 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2408 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2410 vf->num_mdd_events++;
2411 vf_mdd_detected = true;
2415 /* Print out an error message */
2416 if (vf_mdd_detected && pf_mdd_detected)
2418 "Malicious Driver Detection event %d"
2419 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2420 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2421 else if (vf_mdd_detected && !pf_mdd_detected)
2423 "Malicious Driver Detection event %d"
2424 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2425 event, queue, pf_num, vf_num, vp_mdet_num);
2426 else if (!vf_mdd_detected && pf_mdd_detected)
2428 "Malicious Driver Detection event %d"
2429 " on TX queue %d, pf number %d (PF-%d)\n",
2430 event, queue, pf_num, pf_mdet_num);
2431 /* Theoretically shouldn't happen */
2434 "TX Malicious Driver Detection event (unknown)\n");
2438 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2440 struct i40e_hw *hw = &pf->hw;
2441 device_t dev = pf->dev;
2443 bool mdd_detected = false;
2444 bool pf_mdd_detected = false;
2445 bool vf_mdd_detected = false;
2448 u8 pf_mdet_num, vp_mdet_num;
2452 * GL_MDET_RX doesn't contain VF number information, unlike
2455 reg = rd32(hw, I40E_GL_MDET_RX);
2456 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2457 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2458 I40E_GL_MDET_RX_FUNCTION_SHIFT;
2459 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2460 I40E_GL_MDET_RX_EVENT_SHIFT;
2461 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2462 I40E_GL_MDET_RX_QUEUE_SHIFT;
2463 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2464 mdd_detected = true;
2470 reg = rd32(hw, I40E_PF_MDET_RX);
2471 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2472 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2473 pf_mdet_num = hw->pf_id;
2474 pf_mdd_detected = true;
2477 /* Check if MDD was caused by a VF */
2478 for (int i = 0; i < pf->num_vfs; i++) {
2480 reg = rd32(hw, I40E_VP_MDET_RX(i));
2481 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2482 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2484 vf->num_mdd_events++;
2485 vf_mdd_detected = true;
2489 /* Print out an error message */
2490 if (vf_mdd_detected && pf_mdd_detected)
2492 "Malicious Driver Detection event %d"
2493 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2494 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2495 else if (vf_mdd_detected && !pf_mdd_detected)
2497 "Malicious Driver Detection event %d"
2498 " on RX queue %d, pf number %d, (VF-%d)\n",
2499 event, queue, pf_num, vp_mdet_num);
2500 else if (!vf_mdd_detected && pf_mdd_detected)
2502 "Malicious Driver Detection event %d"
2503 " on RX queue %d, pf number %d (PF-%d)\n",
2504 event, queue, pf_num, pf_mdet_num);
2505 /* Theoretically shouldn't happen */
2508 "RX Malicious Driver Detection event (unknown)\n");
2512 * ixl_handle_mdd_event
2514 * Called from interrupt handler to identify possibly malicious vfs
2515 * (But also detects events from the PF, as well)
2518 ixl_handle_mdd_event(struct ixl_pf *pf)
2520 struct i40e_hw *hw = &pf->hw;
2524 * Handle both TX/RX because it's possible they could
2525 * both trigger in the same interrupt.
2527 ixl_handle_tx_mdd_event(pf);
2528 ixl_handle_rx_mdd_event(pf);
2530 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2532 /* re-enable mdd interrupt cause */
2533 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2534 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2535 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2540 ixl_enable_intr(struct ixl_vsi *vsi)
2542 struct i40e_hw *hw = vsi->hw;
2543 struct ixl_rx_queue *que = vsi->rx_queues;
2545 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2546 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2547 ixl_enable_queue(hw, que->rxr.me);
2549 ixl_enable_intr0(hw);
2553 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2555 struct i40e_hw *hw = vsi->hw;
2556 struct ixl_rx_queue *que = vsi->rx_queues;
2558 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2559 ixl_disable_queue(hw, que->rxr.me);
2563 ixl_enable_intr0(struct i40e_hw *hw)
2567 /* Use IXL_ITR_NONE so ITR isn't updated here */
2568 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2569 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2570 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2571 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2575 ixl_disable_intr0(struct i40e_hw *hw)
2579 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2580 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2585 ixl_enable_queue(struct i40e_hw *hw, int id)
2589 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2590 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2591 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2592 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2596 ixl_disable_queue(struct i40e_hw *hw, int id)
2600 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2601 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2605 ixl_update_stats_counters(struct ixl_pf *pf)
2607 struct i40e_hw *hw = &pf->hw;
2608 struct ixl_vsi *vsi = &pf->vsi;
2611 struct i40e_hw_port_stats *nsd = &pf->stats;
2612 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2614 /* Update hw stats */
2615 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2616 pf->stat_offsets_loaded,
2617 &osd->crc_errors, &nsd->crc_errors);
2618 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2619 pf->stat_offsets_loaded,
2620 &osd->illegal_bytes, &nsd->illegal_bytes);
2621 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2622 I40E_GLPRT_GORCL(hw->port),
2623 pf->stat_offsets_loaded,
2624 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2625 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2626 I40E_GLPRT_GOTCL(hw->port),
2627 pf->stat_offsets_loaded,
2628 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2629 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2630 pf->stat_offsets_loaded,
2631 &osd->eth.rx_discards,
2632 &nsd->eth.rx_discards);
2633 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2634 I40E_GLPRT_UPRCL(hw->port),
2635 pf->stat_offsets_loaded,
2636 &osd->eth.rx_unicast,
2637 &nsd->eth.rx_unicast);
2638 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2639 I40E_GLPRT_UPTCL(hw->port),
2640 pf->stat_offsets_loaded,
2641 &osd->eth.tx_unicast,
2642 &nsd->eth.tx_unicast);
2643 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2644 I40E_GLPRT_MPRCL(hw->port),
2645 pf->stat_offsets_loaded,
2646 &osd->eth.rx_multicast,
2647 &nsd->eth.rx_multicast);
2648 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2649 I40E_GLPRT_MPTCL(hw->port),
2650 pf->stat_offsets_loaded,
2651 &osd->eth.tx_multicast,
2652 &nsd->eth.tx_multicast);
2653 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2654 I40E_GLPRT_BPRCL(hw->port),
2655 pf->stat_offsets_loaded,
2656 &osd->eth.rx_broadcast,
2657 &nsd->eth.rx_broadcast);
2658 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2659 I40E_GLPRT_BPTCL(hw->port),
2660 pf->stat_offsets_loaded,
2661 &osd->eth.tx_broadcast,
2662 &nsd->eth.tx_broadcast);
2664 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2665 pf->stat_offsets_loaded,
2666 &osd->tx_dropped_link_down,
2667 &nsd->tx_dropped_link_down);
2668 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2669 pf->stat_offsets_loaded,
2670 &osd->mac_local_faults,
2671 &nsd->mac_local_faults);
2672 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2673 pf->stat_offsets_loaded,
2674 &osd->mac_remote_faults,
2675 &nsd->mac_remote_faults);
2676 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2677 pf->stat_offsets_loaded,
2678 &osd->rx_length_errors,
2679 &nsd->rx_length_errors);
2681 /* Flow control (LFC) stats */
2682 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2683 pf->stat_offsets_loaded,
2684 &osd->link_xon_rx, &nsd->link_xon_rx);
2685 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2686 pf->stat_offsets_loaded,
2687 &osd->link_xon_tx, &nsd->link_xon_tx);
2688 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2689 pf->stat_offsets_loaded,
2690 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2691 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2692 pf->stat_offsets_loaded,
2693 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2695 /* Packet size stats rx */
2696 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2697 I40E_GLPRT_PRC64L(hw->port),
2698 pf->stat_offsets_loaded,
2699 &osd->rx_size_64, &nsd->rx_size_64);
2700 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2701 I40E_GLPRT_PRC127L(hw->port),
2702 pf->stat_offsets_loaded,
2703 &osd->rx_size_127, &nsd->rx_size_127);
2704 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2705 I40E_GLPRT_PRC255L(hw->port),
2706 pf->stat_offsets_loaded,
2707 &osd->rx_size_255, &nsd->rx_size_255);
2708 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2709 I40E_GLPRT_PRC511L(hw->port),
2710 pf->stat_offsets_loaded,
2711 &osd->rx_size_511, &nsd->rx_size_511);
2712 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2713 I40E_GLPRT_PRC1023L(hw->port),
2714 pf->stat_offsets_loaded,
2715 &osd->rx_size_1023, &nsd->rx_size_1023);
2716 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2717 I40E_GLPRT_PRC1522L(hw->port),
2718 pf->stat_offsets_loaded,
2719 &osd->rx_size_1522, &nsd->rx_size_1522);
2720 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2721 I40E_GLPRT_PRC9522L(hw->port),
2722 pf->stat_offsets_loaded,
2723 &osd->rx_size_big, &nsd->rx_size_big);
2725 /* Packet size stats tx */
2726 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2727 I40E_GLPRT_PTC64L(hw->port),
2728 pf->stat_offsets_loaded,
2729 &osd->tx_size_64, &nsd->tx_size_64);
2730 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2731 I40E_GLPRT_PTC127L(hw->port),
2732 pf->stat_offsets_loaded,
2733 &osd->tx_size_127, &nsd->tx_size_127);
2734 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2735 I40E_GLPRT_PTC255L(hw->port),
2736 pf->stat_offsets_loaded,
2737 &osd->tx_size_255, &nsd->tx_size_255);
2738 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2739 I40E_GLPRT_PTC511L(hw->port),
2740 pf->stat_offsets_loaded,
2741 &osd->tx_size_511, &nsd->tx_size_511);
2742 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2743 I40E_GLPRT_PTC1023L(hw->port),
2744 pf->stat_offsets_loaded,
2745 &osd->tx_size_1023, &nsd->tx_size_1023);
2746 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2747 I40E_GLPRT_PTC1522L(hw->port),
2748 pf->stat_offsets_loaded,
2749 &osd->tx_size_1522, &nsd->tx_size_1522);
2750 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2751 I40E_GLPRT_PTC9522L(hw->port),
2752 pf->stat_offsets_loaded,
2753 &osd->tx_size_big, &nsd->tx_size_big);
2755 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2756 pf->stat_offsets_loaded,
2757 &osd->rx_undersize, &nsd->rx_undersize);
2758 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2759 pf->stat_offsets_loaded,
2760 &osd->rx_fragments, &nsd->rx_fragments);
2761 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2762 pf->stat_offsets_loaded,
2763 &osd->rx_oversize, &nsd->rx_oversize);
2764 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2765 pf->stat_offsets_loaded,
2766 &osd->rx_jabber, &nsd->rx_jabber);
2767 pf->stat_offsets_loaded = true;
2770 /* Update vsi stats */
2771 ixl_update_vsi_stats(vsi);
2773 for (int i = 0; i < pf->num_vfs; i++) {
2775 if (vf->vf_flags & VF_FLAG_ENABLED)
2776 ixl_update_eth_stats(&pf->vfs[i].vsi);
2781 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2783 struct i40e_hw *hw = &pf->hw;
2784 device_t dev = pf->dev;
2787 error = i40e_shutdown_lan_hmc(hw);
2790 "Shutdown LAN HMC failed with code %d\n", error);
2792 ixl_disable_intr0(hw);
2794 error = i40e_shutdown_adminq(hw);
2797 "Shutdown Admin queue failed with code %d\n", error);
2799 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2804 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2806 struct i40e_hw *hw = &pf->hw;
2807 struct ixl_vsi *vsi = &pf->vsi;
2808 device_t dev = pf->dev;
2811 device_printf(dev, "Rebuilding driver state...\n");
2813 error = i40e_pf_reset(hw);
2815 device_printf(dev, "PF reset failure %s\n",
2816 i40e_stat_str(hw, error));
2817 goto ixl_rebuild_hw_structs_after_reset_err;
2821 error = i40e_init_adminq(hw);
2822 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2823 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2825 goto ixl_rebuild_hw_structs_after_reset_err;
2828 i40e_clear_pxe_mode(hw);
2830 error = ixl_get_hw_capabilities(pf);
2832 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2833 goto ixl_rebuild_hw_structs_after_reset_err;
2836 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2837 hw->func_caps.num_rx_qp, 0, 0);
2839 device_printf(dev, "init_lan_hmc failed: %d\n", error);
2840 goto ixl_rebuild_hw_structs_after_reset_err;
2843 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2845 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2846 goto ixl_rebuild_hw_structs_after_reset_err;
2849 /* reserve a contiguous allocation for the PF's VSI */
2850 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2852 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2854 /* TODO: error handling */
2857 error = ixl_switch_config(pf);
2859 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2862 goto ixl_rebuild_hw_structs_after_reset_err;
2865 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2868 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2869 " aq_err %d\n", error, hw->aq.asq_last_status);
2871 goto ixl_rebuild_hw_structs_after_reset_err;
2875 error = i40e_set_fc(hw, &set_fc_err_mask, true);
2877 device_printf(dev, "init: setting link flow control failed; retcode %d,"
2878 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2880 goto ixl_rebuild_hw_structs_after_reset_err;
2883 /* Remove default filters reinstalled by FW on reset */
2884 ixl_del_default_hw_filters(vsi);
2886 /* Determine link state */
2887 if (ixl_attach_get_link_status(pf)) {
2889 /* TODO: error handling */
2892 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2893 ixl_get_fw_lldp_status(pf);
2895 /* Keep admin queue interrupts active while driver is loaded */
2896 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2897 ixl_configure_intr0_msix(pf);
2898 ixl_enable_intr0(hw);
2901 device_printf(dev, "Rebuilding driver state done.\n");
2904 ixl_rebuild_hw_structs_after_reset_err:
2905 device_printf(dev, "Reload the driver to recover\n");
2910 ixl_handle_empr_reset(struct ixl_pf *pf)
2912 struct ixl_vsi *vsi = &pf->vsi;
2913 struct i40e_hw *hw = &pf->hw;
2914 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2918 ixl_prepare_for_reset(pf, is_up);
2920 /* Typically finishes within 3-4 seconds */
2921 while (count++ < 100) {
2922 reg = rd32(hw, I40E_GLGEN_RSTAT)
2923 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2925 i40e_msec_delay(100);
2929 ixl_dbg(pf, IXL_DBG_INFO,
2930 "Reset wait count: %d\n", count);
2932 ixl_rebuild_hw_structs_after_reset(pf);
2934 atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2938 * Update VSI-specific ethernet statistics counters.
2941 ixl_update_eth_stats(struct ixl_vsi *vsi)
2943 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2944 struct i40e_hw *hw = &pf->hw;
2945 struct i40e_eth_stats *es;
2946 struct i40e_eth_stats *oes;
2947 struct i40e_hw_port_stats *nsd;
2948 u16 stat_idx = vsi->info.stat_counter_idx;
2950 es = &vsi->eth_stats;
2951 oes = &vsi->eth_stats_offsets;
2954 /* Gather up the stats that the hw collects */
2955 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2956 vsi->stat_offsets_loaded,
2957 &oes->tx_errors, &es->tx_errors);
2958 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2959 vsi->stat_offsets_loaded,
2960 &oes->rx_discards, &es->rx_discards);
2962 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2963 I40E_GLV_GORCL(stat_idx),
2964 vsi->stat_offsets_loaded,
2965 &oes->rx_bytes, &es->rx_bytes);
2966 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2967 I40E_GLV_UPRCL(stat_idx),
2968 vsi->stat_offsets_loaded,
2969 &oes->rx_unicast, &es->rx_unicast);
2970 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2971 I40E_GLV_MPRCL(stat_idx),
2972 vsi->stat_offsets_loaded,
2973 &oes->rx_multicast, &es->rx_multicast);
2974 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2975 I40E_GLV_BPRCL(stat_idx),
2976 vsi->stat_offsets_loaded,
2977 &oes->rx_broadcast, &es->rx_broadcast);
2979 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2980 I40E_GLV_GOTCL(stat_idx),
2981 vsi->stat_offsets_loaded,
2982 &oes->tx_bytes, &es->tx_bytes);
2983 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2984 I40E_GLV_UPTCL(stat_idx),
2985 vsi->stat_offsets_loaded,
2986 &oes->tx_unicast, &es->tx_unicast);
2987 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2988 I40E_GLV_MPTCL(stat_idx),
2989 vsi->stat_offsets_loaded,
2990 &oes->tx_multicast, &es->tx_multicast);
2991 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2992 I40E_GLV_BPTCL(stat_idx),
2993 vsi->stat_offsets_loaded,
2994 &oes->tx_broadcast, &es->tx_broadcast);
2995 vsi->stat_offsets_loaded = true;
2999 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3003 struct i40e_eth_stats *es;
3006 struct i40e_hw_port_stats *nsd;
3010 es = &vsi->eth_stats;
3013 ixl_update_eth_stats(vsi);
3015 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3017 /* Update ifnet stats */
3018 IXL_SET_IPACKETS(vsi, es->rx_unicast +
3021 IXL_SET_OPACKETS(vsi, es->tx_unicast +
3024 IXL_SET_IBYTES(vsi, es->rx_bytes);
3025 IXL_SET_OBYTES(vsi, es->tx_bytes);
3026 IXL_SET_IMCASTS(vsi, es->rx_multicast);
3027 IXL_SET_OMCASTS(vsi, es->tx_multicast);
3029 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3030 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3032 IXL_SET_OERRORS(vsi, es->tx_errors);
3033 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3034 IXL_SET_OQDROPS(vsi, tx_discards);
3035 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3036 IXL_SET_COLLISIONS(vsi, 0);
3040 * Reset all of the stats for the given pf
3043 ixl_pf_reset_stats(struct ixl_pf *pf)
3045 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3046 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3047 pf->stat_offsets_loaded = false;
3051 * Resets all stats of the given vsi
3054 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3056 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3057 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3058 vsi->stat_offsets_loaded = false;
3062 * Read and update a 48 bit stat from the hw
3064 * Since the device stats are not reset at PFReset, they likely will not
3065 * be zeroed when the driver starts. We'll save the first values read
3066 * and use them as offsets to be subtracted from the raw values in order
3067 * to report stats that count from zero.
3070 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3071 bool offset_loaded, u64 *offset, u64 *stat)
3075 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3076 new_data = rd64(hw, loreg);
3079 * Use two rd32's instead of one rd64; FreeBSD versions before
3080 * 10 don't support 64-bit bus reads/writes.
3082 new_data = rd32(hw, loreg);
3083 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3088 if (new_data >= *offset)
3089 *stat = new_data - *offset;
3091 *stat = (new_data + ((u64)1 << 48)) - *offset;
3092 *stat &= 0xFFFFFFFFFFFFULL;
3096 * Read and update a 32 bit stat from the hw
3099 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3100 bool offset_loaded, u64 *offset, u64 *stat)
3104 new_data = rd32(hw, reg);
3107 if (new_data >= *offset)
3108 *stat = (u32)(new_data - *offset);
3110 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3114 ixl_add_device_sysctls(struct ixl_pf *pf)
3116 device_t dev = pf->dev;
3117 struct i40e_hw *hw = &pf->hw;
3119 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3120 struct sysctl_oid_list *ctx_list =
3121 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3123 struct sysctl_oid *debug_node;
3124 struct sysctl_oid_list *debug_list;
3126 struct sysctl_oid *fec_node;
3127 struct sysctl_oid_list *fec_list;
3129 /* Set up sysctls */
3130 SYSCTL_ADD_PROC(ctx, ctx_list,
3131 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3132 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3134 SYSCTL_ADD_PROC(ctx, ctx_list,
3135 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3136 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3138 SYSCTL_ADD_PROC(ctx, ctx_list,
3139 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3140 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3142 SYSCTL_ADD_PROC(ctx, ctx_list,
3143 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3144 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3146 SYSCTL_ADD_PROC(ctx, ctx_list,
3147 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3148 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3150 SYSCTL_ADD_PROC(ctx, ctx_list,
3151 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3152 pf, 0, ixl_sysctl_unallocated_queues, "I",
3153 "Queues not allocated to a PF or VF");
3155 SYSCTL_ADD_PROC(ctx, ctx_list,
3156 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3157 pf, 0, ixl_sysctl_pf_tx_itr, "I",
3158 "Immediately set TX ITR value for all queues");
3160 SYSCTL_ADD_PROC(ctx, ctx_list,
3161 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3162 pf, 0, ixl_sysctl_pf_rx_itr, "I",
3163 "Immediately set RX ITR value for all queues");
3165 SYSCTL_ADD_INT(ctx, ctx_list,
3166 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3167 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3169 SYSCTL_ADD_INT(ctx, ctx_list,
3170 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3171 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3173 /* Add FEC sysctls for 25G adapters */
3174 if (i40e_is_25G_device(hw->device_id)) {
3175 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3176 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3177 fec_list = SYSCTL_CHILDREN(fec_node);
3179 SYSCTL_ADD_PROC(ctx, fec_list,
3180 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3181 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3183 SYSCTL_ADD_PROC(ctx, fec_list,
3184 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3185 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3187 SYSCTL_ADD_PROC(ctx, fec_list,
3188 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3189 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3191 SYSCTL_ADD_PROC(ctx, fec_list,
3192 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3193 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3195 SYSCTL_ADD_PROC(ctx, fec_list,
3196 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3197 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3200 SYSCTL_ADD_PROC(ctx, ctx_list,
3201 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3202 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3204 /* Add sysctls meant to print debug information, but don't list them
3205 * in "sysctl -a" output. */
3206 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3207 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3208 debug_list = SYSCTL_CHILDREN(debug_node);
3210 SYSCTL_ADD_UINT(ctx, debug_list,
3211 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3212 &pf->hw.debug_mask, 0, "Shared code debug message level");
3214 SYSCTL_ADD_UINT(ctx, debug_list,
3215 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3216 &pf->dbg_mask, 0, "Non-shared code debug message level");
3218 SYSCTL_ADD_PROC(ctx, debug_list,
3219 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3220 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3222 SYSCTL_ADD_PROC(ctx, debug_list,
3223 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3224 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3226 SYSCTL_ADD_PROC(ctx, debug_list,
3227 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3228 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3230 SYSCTL_ADD_PROC(ctx, debug_list,
3231 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3232 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3234 SYSCTL_ADD_PROC(ctx, debug_list,
3235 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3236 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3238 SYSCTL_ADD_PROC(ctx, debug_list,
3239 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3240 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3242 SYSCTL_ADD_PROC(ctx, debug_list,
3243 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3244 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3246 SYSCTL_ADD_PROC(ctx, debug_list,
3247 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3248 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3250 SYSCTL_ADD_PROC(ctx, debug_list,
3251 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3252 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3254 SYSCTL_ADD_PROC(ctx, debug_list,
3255 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3256 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3258 SYSCTL_ADD_PROC(ctx, debug_list,
3259 OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3260 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3262 SYSCTL_ADD_PROC(ctx, debug_list,
3263 OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3264 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3266 SYSCTL_ADD_PROC(ctx, debug_list,
3267 OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3268 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3270 SYSCTL_ADD_PROC(ctx, debug_list,
3271 OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3272 pf, 0, ixl_sysctl_do_emp_reset, "I",
3273 "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3275 SYSCTL_ADD_PROC(ctx, debug_list,
3276 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3277 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3280 SYSCTL_ADD_PROC(ctx, debug_list,
3281 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3282 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3284 SYSCTL_ADD_PROC(ctx, debug_list,
3285 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3286 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3288 SYSCTL_ADD_PROC(ctx, debug_list,
3289 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3290 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3295 * Primarily for finding out how many queues can be assigned to VFs,
3299 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3301 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3304 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3306 return sysctl_handle_int(oidp, NULL, queues, req);
3310 ** Set flow control using sysctl:
3317 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3319 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3320 struct i40e_hw *hw = &pf->hw;
3321 device_t dev = pf->dev;
3322 int requested_fc, error = 0;
3323 enum i40e_status_code aq_error = 0;
3327 requested_fc = pf->fc;
3328 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3329 if ((error) || (req->newptr == NULL))
3331 if (requested_fc < 0 || requested_fc > 3) {
3333 "Invalid fc mode; valid modes are 0 through 3\n");
3337 /* Set fc ability for port */
3338 hw->fc.requested_mode = requested_fc;
3339 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3342 "%s: Error setting new fc mode %d; fc_err %#x\n",
3343 __func__, aq_error, fc_aq_err);
3346 pf->fc = requested_fc;
3352 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3366 switch (link_speed) {
3367 case I40E_LINK_SPEED_100MB:
3370 case I40E_LINK_SPEED_1GB:
3373 case I40E_LINK_SPEED_10GB:
3376 case I40E_LINK_SPEED_40GB:
3379 case I40E_LINK_SPEED_20GB:
3382 case I40E_LINK_SPEED_25GB:
3385 case I40E_LINK_SPEED_UNKNOWN:
3391 return speeds[index];
3395 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3397 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3398 struct i40e_hw *hw = &pf->hw;
3401 ixl_update_link_status(pf);
3403 error = sysctl_handle_string(oidp,
3404 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3410 * Converts 8-bit speeds value to and from sysctl flags and
3411 * Admin Queue flags.
3414 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3416 static u16 speedmap[6] = {
3417 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
3418 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
3419 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
3420 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
3421 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
3422 (I40E_LINK_SPEED_40GB | (0x20 << 8))
3426 for (int i = 0; i < 6; i++) {
3428 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3430 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3437 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3439 struct i40e_hw *hw = &pf->hw;
3440 device_t dev = pf->dev;
3441 struct i40e_aq_get_phy_abilities_resp abilities;
3442 struct i40e_aq_set_phy_config config;
3443 enum i40e_status_code aq_error = 0;
3445 /* Get current capability information */
3446 aq_error = i40e_aq_get_phy_capabilities(hw,
3447 FALSE, FALSE, &abilities, NULL);
3450 "%s: Error getting phy capabilities %d,"
3451 " aq error: %d\n", __func__, aq_error,
3452 hw->aq.asq_last_status);
3456 /* Prepare new config */
3457 bzero(&config, sizeof(config));
3459 config.link_speed = speeds;
3461 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3462 config.phy_type = abilities.phy_type;
3463 config.phy_type_ext = abilities.phy_type_ext;
3464 config.abilities = abilities.abilities
3465 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3466 config.eee_capability = abilities.eee_capability;
3467 config.eeer = abilities.eeer_val;
3468 config.low_power_ctrl = abilities.d3_lpan;
3469 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3471 /* Do aq command & restart link */
3472 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3475 "%s: Error setting new phy config %d,"
3476 " aq error: %d\n", __func__, aq_error,
3477 hw->aq.asq_last_status);
3485 ** Supported link speedsL
3495 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3497 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3498 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3500 return sysctl_handle_int(oidp, NULL, supported, req);
3504 ** Control link advertise speed:
3506 ** 0x1 - advertise 100 Mb
3507 ** 0x2 - advertise 1G
3508 ** 0x4 - advertise 10G
3509 ** 0x8 - advertise 20G
3510 ** 0x10 - advertise 25G
3511 ** 0x20 - advertise 40G
3513 ** Set to 0 to disable link
3516 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3518 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3519 device_t dev = pf->dev;
3520 u8 converted_speeds;
3521 int requested_ls = 0;
3524 /* Read in new mode */
3525 requested_ls = pf->advertised_speed;
3526 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3527 if ((error) || (req->newptr == NULL))
3530 /* Error out if bits outside of possible flag range are set */
3531 if ((requested_ls & ~((u8)0x3F)) != 0) {
3532 device_printf(dev, "Input advertised speed out of range; "
3533 "valid flags are: 0x%02x\n",
3534 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3538 /* Check if adapter supports input value */
3539 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3540 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3541 device_printf(dev, "Invalid advertised speed; "
3542 "valid flags are: 0x%02x\n",
3543 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3547 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3551 pf->advertised_speed = requested_ls;
3552 ixl_update_link_status(pf);
3557 ** Get the width and transaction speed of
3558 ** the bus this adapter is plugged into.
3561 ixl_get_bus_info(struct ixl_pf *pf)
3563 struct i40e_hw *hw = &pf->hw;
3564 device_t dev = pf->dev;
3566 u32 offset, num_ports;
3569 /* Some devices don't use PCIE */
3570 if (hw->mac.type == I40E_MAC_X722)
3573 /* Read PCI Express Capabilities Link Status Register */
3574 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3575 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3577 /* Fill out hw struct with PCIE info */
3578 i40e_set_pci_config_data(hw, link);
3580 /* Use info to print out bandwidth messages */
3581 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3582 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3583 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3584 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3585 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3586 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3587 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3588 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3592 * If adapter is in slot with maximum supported speed,
3593 * no warning message needs to be printed out.
3595 if (hw->bus.speed >= i40e_bus_speed_8000
3596 && hw->bus.width >= i40e_bus_width_pcie_x8)
3599 num_ports = bitcount32(hw->func_caps.valid_functions);
3600 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3602 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3603 device_printf(dev, "PCI-Express bandwidth available"
3604 " for this device may be insufficient for"
3605 " optimal performance.\n");
3606 device_printf(dev, "Please move the device to a different"
3607 " PCI-e link with more lanes and/or higher"
3608 " transfer rate.\n");
3613 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3615 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3616 struct i40e_hw *hw = &pf->hw;
3619 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3620 ixl_nvm_version_str(hw, sbuf);
3628 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3630 if ((nvma->command == I40E_NVM_READ) &&
3631 ((nvma->config & 0xFF) == 0xF) &&
3632 (((nvma->config & 0xF00) >> 8) == 0xF) &&
3633 (nvma->offset == 0) &&
3634 (nvma->data_size == 1)) {
3635 // device_printf(dev, "- Get Driver Status Command\n");
3637 else if (nvma->command == I40E_NVM_READ) {
3641 switch (nvma->command) {
3643 device_printf(dev, "- command: I40E_NVM_READ\n");
3646 device_printf(dev, "- command: I40E_NVM_WRITE\n");
3649 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3653 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
3654 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3655 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3656 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3661 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3663 struct i40e_hw *hw = &pf->hw;
3664 struct i40e_nvm_access *nvma;
3665 device_t dev = pf->dev;
3666 enum i40e_status_code status = 0;
3667 size_t nvma_size, ifd_len, exp_len;
3670 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3673 nvma_size = sizeof(struct i40e_nvm_access);
3674 ifd_len = ifd->ifd_len;
3676 if (ifd_len < nvma_size ||
3677 ifd->ifd_data == NULL) {
3678 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3680 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3681 __func__, ifd_len, nvma_size);
3682 device_printf(dev, "%s: data pointer: %p\n", __func__,
3687 nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3688 err = copyin(ifd->ifd_data, nvma, ifd_len);
3690 device_printf(dev, "%s: Cannot get request from user space\n",
3692 free(nvma, M_DEVBUF);
3696 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3697 ixl_print_nvm_cmd(dev, nvma);
3699 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3701 while (count++ < 100) {
3702 i40e_msec_delay(100);
3703 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3708 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3709 free(nvma, M_DEVBUF);
3713 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3714 device_printf(dev, "%s: invalid request, data size not in supported range\n",
3716 free(nvma, M_DEVBUF);
3721 * Older versions of the NVM update tool don't set ifd_len to the size
3722 * of the entire buffer passed to the ioctl. Check the data_size field
3723 * in the contained i40e_nvm_access struct and ensure everything is
3724 * copied in from userspace.
3726 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3728 if (ifd_len < exp_len) {
3730 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3731 err = copyin(ifd->ifd_data, nvma, ifd_len);
3733 device_printf(dev, "%s: Cannot get request from user space\n",
3735 free(nvma, M_DEVBUF);
3740 // TODO: Might need a different lock here
3742 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3743 // IXL_PF_UNLOCK(pf);
3745 err = copyout(nvma, ifd->ifd_data, ifd_len);
3746 free(nvma, M_DEVBUF);
3748 device_printf(dev, "%s: Cannot return data to user space\n",
3753 /* Let the nvmupdate report errors, show them only when debug is enabled */
3754 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3755 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3756 i40e_stat_str(hw, status), perrno);
3759 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3760 * to run this ioctl again. So use -EACCES for -EPERM instead.
3762 if (perrno == -EPERM)
3769 ixl_find_i2c_interface(struct ixl_pf *pf)
3771 struct i40e_hw *hw = &pf->hw;
3772 bool i2c_en, port_matched;
3775 for (int i = 0; i < 4; i++) {
3776 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3777 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3778 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3779 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3781 if (i2c_en && port_matched)
3789 ixl_phy_type_string(u32 bit_pos, bool ext)
3791 static char * phy_types_str[32] = {
3821 "1000BASE-T Optical",
3825 static char * ext_phy_types_str[8] = {
3836 if (ext && bit_pos > 7) return "Invalid_Ext";
3837 if (bit_pos > 31) return "Invalid";
3839 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3842 /* TODO: ERJ: I don't this is necessary anymore. */
3844 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3846 device_t dev = pf->dev;
3847 struct i40e_hw *hw = &pf->hw;
3848 struct i40e_aq_desc desc;
3849 enum i40e_status_code status;
3851 struct i40e_aqc_get_link_status *aq_link_status =
3852 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3854 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3855 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3856 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3859 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3860 __func__, i40e_stat_str(hw, status),
3861 i40e_aq_str(hw, hw->aq.asq_last_status));
3865 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3870 ixl_phy_type_string_ls(u8 val)
3873 return ixl_phy_type_string(val - 0x1F, true);
3875 return ixl_phy_type_string(val, false);
3879 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3881 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3882 device_t dev = pf->dev;
3886 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3888 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3892 struct i40e_aqc_get_link_status link_status;
3893 error = ixl_aq_get_link_status(pf, &link_status);
3899 sbuf_printf(buf, "\n"
3900 "PHY Type : 0x%02x<%s>\n"
3902 "Link info: 0x%02x\n"
3903 "AN info : 0x%02x\n"
3904 "Ext info : 0x%02x\n"
3905 "Loopback : 0x%02x\n"
3909 link_status.phy_type,
3910 ixl_phy_type_string_ls(link_status.phy_type),
3911 link_status.link_speed,
3912 link_status.link_info,
3913 link_status.an_info,
3914 link_status.ext_info,
3915 link_status.loopback,
3916 link_status.max_frame_size,
3918 link_status.power_desc);
3920 error = sbuf_finish(buf);
3922 device_printf(dev, "Error finishing sbuf: %d\n", error);
3929 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3931 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3932 struct i40e_hw *hw = &pf->hw;
3933 device_t dev = pf->dev;
3934 enum i40e_status_code status;
3935 struct i40e_aq_get_phy_abilities_resp abilities;
3939 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3941 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3945 status = i40e_aq_get_phy_capabilities(hw,
3946 FALSE, FALSE, &abilities, NULL);
3949 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3950 __func__, i40e_stat_str(hw, status),
3951 i40e_aq_str(hw, hw->aq.asq_last_status));
3956 sbuf_printf(buf, "\n"
3958 abilities.phy_type);
3960 if (abilities.phy_type != 0) {
3961 sbuf_printf(buf, "<");
3962 for (int i = 0; i < 32; i++)
3963 if ((1 << i) & abilities.phy_type)
3964 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3965 sbuf_printf(buf, ">\n");
3968 sbuf_printf(buf, "PHY Ext : %02x",
3969 abilities.phy_type_ext);
3971 if (abilities.phy_type_ext != 0) {
3972 sbuf_printf(buf, "<");
3973 for (int i = 0; i < 4; i++)
3974 if ((1 << i) & abilities.phy_type_ext)
3975 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3976 sbuf_printf(buf, ">");
3978 sbuf_printf(buf, "\n");
3986 "ID : %02x %02x %02x %02x\n"
3987 "ModType : %02x %02x %02x\n"
3991 abilities.link_speed,
3992 abilities.abilities, abilities.eee_capability,
3993 abilities.eeer_val, abilities.d3_lpan,
3994 abilities.phy_id[0], abilities.phy_id[1],
3995 abilities.phy_id[2], abilities.phy_id[3],
3996 abilities.module_type[0], abilities.module_type[1],
3997 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3998 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3999 abilities.ext_comp_code);
4001 error = sbuf_finish(buf);
4003 device_printf(dev, "Error finishing sbuf: %d\n", error);
4010 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4012 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4013 struct ixl_vsi *vsi = &pf->vsi;
4014 struct ixl_mac_filter *f;
4015 device_t dev = pf->dev;
4016 int error = 0, ftl_len = 0, ftl_counter = 0;
4020 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4022 device_printf(dev, "Could not allocate sbuf for output.\n");
4026 sbuf_printf(buf, "\n");
4028 /* Print MAC filters */
4029 sbuf_printf(buf, "PF Filters:\n");
4030 SLIST_FOREACH(f, &vsi->ftl, next)
4034 sbuf_printf(buf, "(none)\n");
4036 SLIST_FOREACH(f, &vsi->ftl, next) {
4038 MAC_FORMAT ", vlan %4d, flags %#06x",
4039 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4040 /* don't print '\n' for last entry */
4041 if (++ftl_counter != ftl_len)
4042 sbuf_printf(buf, "\n");
4047 /* TODO: Give each VF its own filter list sysctl */
4049 if (pf->num_vfs > 0) {
4050 sbuf_printf(buf, "\n\n");
4051 for (int i = 0; i < pf->num_vfs; i++) {
4053 if (!(vf->vf_flags & VF_FLAG_ENABLED))
4057 ftl_len = 0, ftl_counter = 0;
4058 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4059 SLIST_FOREACH(f, &vsi->ftl, next)
4063 sbuf_printf(buf, "(none)\n");
4065 SLIST_FOREACH(f, &vsi->ftl, next) {
4067 MAC_FORMAT ", vlan %4d, flags %#06x\n",
4068 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4075 error = sbuf_finish(buf);
4077 device_printf(dev, "Error finishing sbuf: %d\n", error);
4083 #define IXL_SW_RES_SIZE 0x14
4085 ixl_res_alloc_cmp(const void *a, const void *b)
4087 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4088 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4089 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4091 return ((int)one->resource_type - (int)two->resource_type);
4095 * Longest string length: 25
4098 ixl_switch_res_type_string(u8 type)
4100 // TODO: This should be changed to static const
4101 char * ixl_switch_res_type_strings[0x14] = {
4104 "Perfect Match MAC address",
4107 "Multicast hash entry",
4108 "Unicast hash entry",
4112 "VLAN Statistic Pool",
4115 "Inner VLAN Forward filter",
4125 return ixl_switch_res_type_strings[type];
4127 return "(Reserved)";
4131 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4133 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4134 struct i40e_hw *hw = &pf->hw;
4135 device_t dev = pf->dev;
4137 enum i40e_status_code status;
4141 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4143 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4145 device_printf(dev, "Could not allocate sbuf for output.\n");
4149 bzero(resp, sizeof(resp));
4150 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4156 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4157 __func__, i40e_stat_str(hw, status),
4158 i40e_aq_str(hw, hw->aq.asq_last_status));
4163 /* Sort entries by type for display */
4164 qsort(resp, num_entries,
4165 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4166 &ixl_res_alloc_cmp);
4168 sbuf_cat(buf, "\n");
4169 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4171 " Type | Guaranteed | Total | Used | Un-allocated\n"
4172 " | (this) | (all) | (this) | (all) \n");
4173 for (int i = 0; i < num_entries; i++) {
4175 "%25s | %10d %5d %6d %12d",
4176 ixl_switch_res_type_string(resp[i].resource_type),
4180 resp[i].total_unalloced);
4181 if (i < num_entries - 1)
4182 sbuf_cat(buf, "\n");
4185 error = sbuf_finish(buf);
4187 device_printf(dev, "Error finishing sbuf: %d\n", error);
4194 ** Caller must init and delete sbuf; this function will clear and
4195 ** finish it for caller.
4198 ixl_switch_element_string(struct sbuf *s,
4199 struct i40e_aqc_switch_config_element_resp *element)
4203 switch (element->element_type) {
4204 case I40E_AQ_SW_ELEM_TYPE_MAC:
4205 sbuf_printf(s, "MAC %3d", element->element_info);
4207 case I40E_AQ_SW_ELEM_TYPE_PF:
4208 sbuf_printf(s, "PF %3d", element->element_info);
4210 case I40E_AQ_SW_ELEM_TYPE_VF:
4211 sbuf_printf(s, "VF %3d", element->element_info);
4213 case I40E_AQ_SW_ELEM_TYPE_EMP:
4216 case I40E_AQ_SW_ELEM_TYPE_BMC:
4219 case I40E_AQ_SW_ELEM_TYPE_PV:
4222 case I40E_AQ_SW_ELEM_TYPE_VEB:
4225 case I40E_AQ_SW_ELEM_TYPE_PA:
4228 case I40E_AQ_SW_ELEM_TYPE_VSI:
4229 sbuf_printf(s, "VSI %3d", element->element_info);
4237 return sbuf_data(s);
4241 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4243 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4244 struct i40e_hw *hw = &pf->hw;
4245 device_t dev = pf->dev;
4248 enum i40e_status_code status;
4251 u8 aq_buf[I40E_AQ_LARGE_BUF];
4253 struct i40e_aqc_get_switch_config_resp *sw_config;
4254 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4256 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4258 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4262 status = i40e_aq_get_switch_config(hw, sw_config,
4263 sizeof(aq_buf), &next, NULL);
4266 "%s: aq_get_switch_config() error %s, aq error %s\n",
4267 __func__, i40e_stat_str(hw, status),
4268 i40e_aq_str(hw, hw->aq.asq_last_status));
4273 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4276 nmbuf = sbuf_new_auto();
4278 device_printf(dev, "Could not allocate sbuf for name output.\n");
4283 sbuf_cat(buf, "\n");
4284 /* Assuming <= 255 elements in switch */
4285 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4286 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4288 ** Revision -- all elements are revision 1 for now
4291 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4292 " | | | (uplink)\n");
4293 for (int i = 0; i < sw_config->header.num_reported; i++) {
4294 // "%4d (%8s) | %8s %8s %#8x",
4295 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4297 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4298 &sw_config->element[i]));
4299 sbuf_cat(buf, " | ");
4300 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4302 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4304 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4305 if (i < sw_config->header.num_reported - 1)
4306 sbuf_cat(buf, "\n");
4310 error = sbuf_finish(buf);
4312 device_printf(dev, "Error finishing sbuf: %d\n", error);
4320 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4322 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4323 struct i40e_hw *hw = &pf->hw;
4324 device_t dev = pf->dev;
4327 enum i40e_status_code status;
4330 struct i40e_aqc_get_set_rss_key_data key_data;
4332 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4334 device_printf(dev, "Could not allocate sbuf for output.\n");
4338 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4340 sbuf_cat(buf, "\n");
4341 if (hw->mac.type == I40E_MAC_X722) {
4342 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4344 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4345 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4347 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4348 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4349 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4353 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4355 error = sbuf_finish(buf);
4357 device_printf(dev, "Error finishing sbuf: %d\n", error);
4364 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4369 if (length < 1 || buf == NULL) return;
4371 int byte_stride = 16;
4372 int lines = length / byte_stride;
4373 int rem = length % byte_stride;
4377 for (i = 0; i < lines; i++) {
4378 width = (rem > 0 && i == lines - 1)
4379 ? rem : byte_stride;
4381 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4383 for (j = 0; j < width; j++)
4384 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4386 if (width < byte_stride) {
4387 for (k = 0; k < (byte_stride - width); k++)
4388 sbuf_printf(sb, " ");
4392 sbuf_printf(sb, "\n");
4396 for (j = 0; j < width; j++) {
4397 c = (char)buf[i * byte_stride + j];
4398 if (c < 32 || c > 126)
4399 sbuf_printf(sb, ".");
4401 sbuf_printf(sb, "%c", c);
4404 sbuf_printf(sb, "\n");
4410 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4412 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4413 struct i40e_hw *hw = &pf->hw;
4414 device_t dev = pf->dev;
4417 enum i40e_status_code status;
4421 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4423 device_printf(dev, "Could not allocate sbuf for output.\n");
4427 bzero(hlut, sizeof(hlut));
4428 sbuf_cat(buf, "\n");
4429 if (hw->mac.type == I40E_MAC_X722) {
4430 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4432 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4433 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4435 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4436 reg = rd32(hw, I40E_PFQF_HLUT(i));
4437 bcopy(®, &hlut[i << 2], 4);
4440 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4442 error = sbuf_finish(buf);
4444 device_printf(dev, "Error finishing sbuf: %d\n", error);
4451 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4453 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4454 struct i40e_hw *hw = &pf->hw;
4457 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4458 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4460 return sysctl_handle_long(oidp, NULL, hena, req);
4464 * Sysctl to disable firmware's link management
4466 * 1 - Disable link management on this port
4467 * 0 - Re-enable link management
4469 * On normal NVMs, firmware manages link by default.
4472 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4474 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4475 struct i40e_hw *hw = &pf->hw;
4476 device_t dev = pf->dev;
4477 int requested_mode = -1;
4478 enum i40e_status_code status = 0;
4481 /* Read in new mode */
4482 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4483 if ((error) || (req->newptr == NULL))
4485 /* Check for sane value */
4486 if (requested_mode < 0 || requested_mode > 1) {
4487 device_printf(dev, "Valid modes are 0 or 1\n");
4492 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4495 "%s: Error setting new phy debug mode %s,"
4496 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4497 i40e_aq_str(hw, hw->aq.asq_last_status));
4505 * Read some diagnostic data from an SFP module
4506 * Bytes 96-99, 102-105 from device address 0xA2
4509 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4511 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4512 device_t dev = pf->dev;
4517 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4519 device_printf(dev, "Error reading from i2c\n");
4522 if (output != 0x3) {
4523 device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4527 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4528 if (!(output & 0x60)) {
4529 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4533 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4535 for (u8 offset = 96; offset < 100; offset++) {
4536 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4537 sbuf_printf(sbuf, "%02X ", output);
4539 for (u8 offset = 102; offset < 106; offset++) {
4540 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4541 sbuf_printf(sbuf, "%02X ", output);
4551 * Sysctl to read a byte from I2C bus.
4553 * Input: 32-bit value:
4554 * bits 0-7: device address (0xA0 or 0xA2)
4555 * bits 8-15: offset (0-255)
4556 * bits 16-31: unused
4557 * Output: 8-bit value read
4560 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4562 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4563 device_t dev = pf->dev;
4564 int input = -1, error = 0;
4565 u8 dev_addr, offset, output;
4567 /* Read in I2C read parameters */
4568 error = sysctl_handle_int(oidp, &input, 0, req);
4569 if ((error) || (req->newptr == NULL))
4571 /* Validate device address */
4572 dev_addr = input & 0xFF;
4573 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4576 offset = (input >> 8) & 0xFF;
4578 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4582 device_printf(dev, "%02X\n", output);
4587 * Sysctl to write a byte to the I2C bus.
4589 * Input: 32-bit value:
4590 * bits 0-7: device address (0xA0 or 0xA2)
4591 * bits 8-15: offset (0-255)
4592 * bits 16-23: value to write
4593 * bits 24-31: unused
4594 * Output: 8-bit value written
4597 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4599 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4600 device_t dev = pf->dev;
4601 int input = -1, error = 0;
4602 u8 dev_addr, offset, value;
4604 /* Read in I2C write parameters */
4605 error = sysctl_handle_int(oidp, &input, 0, req);
4606 if ((error) || (req->newptr == NULL))
4608 /* Validate device address */
4609 dev_addr = input & 0xFF;
4610 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4613 offset = (input >> 8) & 0xFF;
4614 value = (input >> 16) & 0xFF;
4616 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4620 device_printf(dev, "%02X written\n", value);
4625 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4626 u8 bit_pos, int *is_set)
4628 device_t dev = pf->dev;
4629 struct i40e_hw *hw = &pf->hw;
4630 enum i40e_status_code status;
4632 status = i40e_aq_get_phy_capabilities(hw,
4633 FALSE, FALSE, abilities, NULL);
4636 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4637 __func__, i40e_stat_str(hw, status),
4638 i40e_aq_str(hw, hw->aq.asq_last_status));
4642 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4647 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4648 u8 bit_pos, int set)
4650 device_t dev = pf->dev;
4651 struct i40e_hw *hw = &pf->hw;
4652 struct i40e_aq_set_phy_config config;
4653 enum i40e_status_code status;
4655 /* Set new PHY config */
4656 memset(&config, 0, sizeof(config));
4657 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4659 config.fec_config |= bit_pos;
4660 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4661 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4662 config.phy_type = abilities->phy_type;
4663 config.phy_type_ext = abilities->phy_type_ext;
4664 config.link_speed = abilities->link_speed;
4665 config.eee_capability = abilities->eee_capability;
4666 config.eeer = abilities->eeer_val;
4667 config.low_power_ctrl = abilities->d3_lpan;
4668 status = i40e_aq_set_phy_config(hw, &config, NULL);
4672 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4673 __func__, i40e_stat_str(hw, status),
4674 i40e_aq_str(hw, hw->aq.asq_last_status));
4683 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4685 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4686 int mode, error = 0;
4688 struct i40e_aq_get_phy_abilities_resp abilities;
4689 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4692 /* Read in new mode */
4693 error = sysctl_handle_int(oidp, &mode, 0, req);
4694 if ((error) || (req->newptr == NULL))
4697 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4701 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4703 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4704 int mode, error = 0;
4706 struct i40e_aq_get_phy_abilities_resp abilities;
4707 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4710 /* Read in new mode */
4711 error = sysctl_handle_int(oidp, &mode, 0, req);
4712 if ((error) || (req->newptr == NULL))
4715 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4719 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4721 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4722 int mode, error = 0;
4724 struct i40e_aq_get_phy_abilities_resp abilities;
4725 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4728 /* Read in new mode */
4729 error = sysctl_handle_int(oidp, &mode, 0, req);
4730 if ((error) || (req->newptr == NULL))
4733 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4737 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4739 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4740 int mode, error = 0;
4742 struct i40e_aq_get_phy_abilities_resp abilities;
4743 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4746 /* Read in new mode */
4747 error = sysctl_handle_int(oidp, &mode, 0, req);
4748 if ((error) || (req->newptr == NULL))
4751 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4755 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4757 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4758 int mode, error = 0;
4760 struct i40e_aq_get_phy_abilities_resp abilities;
4761 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4764 /* Read in new mode */
4765 error = sysctl_handle_int(oidp, &mode, 0, req);
4766 if ((error) || (req->newptr == NULL))
4769 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4773 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4775 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4776 struct i40e_hw *hw = &pf->hw;
4777 device_t dev = pf->dev;
4780 enum i40e_status_code status;
4782 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4784 device_printf(dev, "Could not allocate sbuf for output.\n");
4789 /* This amount is only necessary if reading the entire cluster into memory */
4790 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4791 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4792 if (final_buff == NULL) {
4793 device_printf(dev, "Could not allocate memory for output.\n");
4796 int final_buff_len = 0;
4802 u16 curr_buff_size = 4096;
4803 u8 curr_next_table = 0;
4804 u32 curr_next_index = 0;
4810 sbuf_cat(buf, "\n");
4813 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4814 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4816 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4817 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4821 /* copy info out of temp buffer */
4822 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4823 final_buff_len += ret_buff_size;
4825 if (ret_next_table != curr_next_table) {
4826 /* We're done with the current table; we can dump out read data. */
4827 sbuf_printf(buf, "%d:", curr_next_table);
4828 int bytes_printed = 0;
4829 while (bytes_printed <= final_buff_len) {
4830 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4831 bytes_printed += 16;
4833 sbuf_cat(buf, "\n");
4835 /* The entire cluster has been read; we're finished */
4836 if (ret_next_table == 0xFF)
4839 /* Otherwise clear the output buffer and continue reading */
4840 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4844 if (ret_next_index == 0xFFFFFFFF)
4847 bzero(dump_buf, sizeof(dump_buf));
4848 curr_next_table = ret_next_table;
4849 curr_next_index = ret_next_index;
4853 free(final_buff, M_DEVBUF);
4855 error = sbuf_finish(buf);
4857 device_printf(dev, "Error finishing sbuf: %d\n", error);
4864 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4866 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4867 struct i40e_hw *hw = &pf->hw;
4868 device_t dev = pf->dev;
4870 int state, new_state;
4871 enum i40e_status_code status;
4872 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4874 /* Read in new mode */
4875 error = sysctl_handle_int(oidp, &new_state, 0, req);
4876 if ((error) || (req->newptr == NULL))
4879 /* Already in requested state */
4880 if (new_state == state)
4883 if (new_state == 0) {
4884 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4885 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4889 if (pf->hw.aq.api_maj_ver < 1 ||
4890 (pf->hw.aq.api_maj_ver == 1 &&
4891 pf->hw.aq.api_min_ver < 7)) {
4892 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4896 i40e_aq_stop_lldp(&pf->hw, true, NULL);
4897 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4898 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4900 status = i40e_aq_start_lldp(&pf->hw, NULL);
4901 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4902 device_printf(dev, "FW LLDP agent is already running\n");
4903 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4910 * Get FW LLDP Agent status
4913 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4915 enum i40e_status_code ret = I40E_SUCCESS;
4916 struct i40e_lldp_variables lldp_cfg;
4917 struct i40e_hw *hw = &pf->hw;
4920 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4924 /* Get the LLDP AdminStatus for the current port */
4925 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4928 /* Check if LLDP agent is disabled */
4930 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4931 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4933 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4939 ixl_attach_get_link_status(struct ixl_pf *pf)
4941 struct i40e_hw *hw = &pf->hw;
4942 device_t dev = pf->dev;
4945 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4946 (hw->aq.fw_maj_ver < 4)) {
4947 i40e_msec_delay(75);
4948 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4950 device_printf(dev, "link restart failed, aq_err=%d\n",
4951 pf->hw.aq.asq_last_status);
4956 /* Determine link state */
4957 hw->phy.get_link_info = TRUE;
4958 i40e_get_link_status(hw, &pf->link_up);
4963 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4965 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4966 int requested = 0, error = 0;
4968 /* Read in new mode */
4969 error = sysctl_handle_int(oidp, &requested, 0, req);
4970 if ((error) || (req->newptr == NULL))
4973 /* Initiate the PF reset later in the admin task */
4974 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4980 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4982 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4983 struct i40e_hw *hw = &pf->hw;
4984 int requested = 0, error = 0;
4986 /* Read in new mode */
4987 error = sysctl_handle_int(oidp, &requested, 0, req);
4988 if ((error) || (req->newptr == NULL))
4991 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4997 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4999 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5000 struct i40e_hw *hw = &pf->hw;
5001 int requested = 0, error = 0;
5003 /* Read in new mode */
5004 error = sysctl_handle_int(oidp, &requested, 0, req);
5005 if ((error) || (req->newptr == NULL))
5008 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5014 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5016 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5017 struct i40e_hw *hw = &pf->hw;
5018 int requested = 0, error = 0;
5020 /* Read in new mode */
5021 error = sysctl_handle_int(oidp, &requested, 0, req);
5022 if ((error) || (req->newptr == NULL))
5025 /* TODO: Find out how to bypass this */
5026 if (!(rd32(hw, 0x000B818C) & 0x1)) {
5027 device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5030 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5036 * Print out mapping of TX queue indexes and Rx queue indexes
5040 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5042 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5043 struct ixl_vsi *vsi = &pf->vsi;
5044 device_t dev = pf->dev;
5048 struct ixl_rx_queue *rx_que = vsi->rx_queues;
5049 struct ixl_tx_queue *tx_que = vsi->tx_queues;
5051 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5053 device_printf(dev, "Could not allocate sbuf for output.\n");
5057 sbuf_cat(buf, "\n");
5058 for (int i = 0; i < vsi->num_rx_queues; i++) {
5059 rx_que = &vsi->rx_queues[i];
5060 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5062 for (int i = 0; i < vsi->num_tx_queues; i++) {
5063 tx_que = &vsi->tx_queues[i];
5064 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5067 error = sbuf_finish(buf);
5069 device_printf(dev, "Error finishing sbuf: %d\n", error);