1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
51 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 const char * const ixl_fc_string[6] = {
107 static char *ixl_fec_string[3] = {
109 "CL74 FC-FEC/BASE-R",
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
121 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
126 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 IXL_NVM_VERSION_HI_SHIFT,
131 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 IXL_NVM_VERSION_LO_SHIFT,
134 oem_ver, oem_build, oem_patch);
138 ixl_print_nvm_version(struct ixl_pf *pf)
140 struct i40e_hw *hw = &pf->hw;
141 device_t dev = pf->dev;
144 sbuf = sbuf_new_auto();
145 ixl_nvm_version_str(hw, sbuf);
147 device_printf(dev, "%s\n", sbuf_data(sbuf));
152 ixl_configure_tx_itr(struct ixl_pf *pf)
154 struct i40e_hw *hw = &pf->hw;
155 struct ixl_vsi *vsi = &pf->vsi;
156 struct ixl_tx_queue *que = vsi->tx_queues;
158 vsi->tx_itr_setting = pf->tx_itr;
160 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 struct tx_ring *txr = &que->txr;
163 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 vsi->tx_itr_setting);
165 txr->itr = vsi->tx_itr_setting;
166 txr->latency = IXL_AVE_LATENCY;
171 ixl_configure_rx_itr(struct ixl_pf *pf)
173 struct i40e_hw *hw = &pf->hw;
174 struct ixl_vsi *vsi = &pf->vsi;
175 struct ixl_rx_queue *que = vsi->rx_queues;
177 vsi->rx_itr_setting = pf->rx_itr;
179 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 struct rx_ring *rxr = &que->rxr;
182 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 vsi->rx_itr_setting);
184 rxr->itr = vsi->rx_itr_setting;
185 rxr->latency = IXL_AVE_LATENCY;
190 * Write PF ITR values to queue ITR registers.
193 ixl_configure_itr(struct ixl_pf *pf)
195 ixl_configure_tx_itr(pf);
196 ixl_configure_rx_itr(pf);
199 /*********************************************************************
201 * Get the hardware capabilities
203 **********************************************************************/
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
208 struct i40e_aqc_list_capabilities_element_resp *buf;
209 struct i40e_hw *hw = &pf->hw;
210 device_t dev = pf->dev;
211 enum i40e_status_code status;
212 int len, i2c_intfc_num;
216 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
218 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 device_printf(dev, "Unable to allocate cap memory\n");
224 /* This populates the hw struct */
225 status = i40e_aq_discover_capabilities(hw, buf, len,
226 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
228 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
230 /* retry once with a larger buffer */
234 } else if (status != I40E_SUCCESS) {
235 device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
241 * Some devices have both MDIO and I2C; since this isn't reported
242 * by the FW, check registers to see if an I2C interface exists.
244 i2c_intfc_num = ixl_find_i2c_interface(pf);
245 if (i2c_intfc_num != -1)
248 /* Determine functions to use for driver I2C accesses */
249 switch (pf->i2c_access_method) {
251 if (hw->mac.type == I40E_MAC_XL710 &&
252 hw->aq.api_maj_ver == 1 &&
253 hw->aq.api_min_ver >= 7) {
254 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
257 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
263 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
267 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
271 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
275 /* Should not happen */
276 device_printf(dev, "Error setting I2C access functions\n");
280 /* Print a subset of the capability information. */
282 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
283 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
284 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
285 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
286 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
287 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
293 /* For the set_advertise sysctl */
295 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
297 device_t dev = pf->dev;
300 /* Make sure to initialize the device to the complete list of
301 * supported speeds on driver load, to ensure unloading and
302 * reloading the driver will restore this value.
304 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
306 /* Non-fatal error */
307 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
312 pf->advertised_speed =
313 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
317 ixl_teardown_hw_structs(struct ixl_pf *pf)
319 enum i40e_status_code status = 0;
320 struct i40e_hw *hw = &pf->hw;
321 device_t dev = pf->dev;
323 /* Shutdown LAN HMC */
324 if (hw->hmc.hmc_obj) {
325 status = i40e_shutdown_lan_hmc(hw);
328 "init: LAN HMC shutdown failure; status %s\n",
329 i40e_stat_str(hw, status));
334 /* Shutdown admin queue */
335 ixl_disable_intr0(hw);
336 status = i40e_shutdown_adminq(hw);
339 "init: Admin Queue shutdown failure; status %s\n",
340 i40e_stat_str(hw, status));
342 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
348 ixl_reset(struct ixl_pf *pf)
350 struct i40e_hw *hw = &pf->hw;
351 device_t dev = pf->dev;
355 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
357 error = i40e_pf_reset(hw);
359 device_printf(dev, "init: PF reset failure\n");
364 error = i40e_init_adminq(hw);
366 device_printf(dev, "init: Admin queue init failure;"
367 " status code %d\n", error);
372 i40e_clear_pxe_mode(hw);
375 error = ixl_get_hw_capabilities(pf);
377 device_printf(dev, "init: Error retrieving HW capabilities;"
378 " status code %d\n", error);
382 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
383 hw->func_caps.num_rx_qp, 0, 0);
385 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
391 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
393 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
399 // XXX: possible fix for panic, but our failure recovery is still broken
400 error = ixl_switch_config(pf);
402 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
407 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
410 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
411 " aq_err %d\n", error, hw->aq.asq_last_status);
416 error = i40e_set_fc(hw, &set_fc_err_mask, true);
418 device_printf(dev, "init: setting link flow control failed; retcode %d,"
419 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
423 // XXX: (Rebuild VSIs?)
425 /* Firmware delay workaround */
426 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
427 (hw->aq.fw_maj_ver < 4)) {
429 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
431 device_printf(dev, "init: link restart failed, aq_err %d\n",
432 hw->aq.asq_last_status);
438 /* Re-enable admin queue interrupt */
440 ixl_configure_intr0_msix(pf);
441 ixl_enable_intr0(hw);
447 ixl_rebuild_hw_structs_after_reset(pf);
449 /* The PF reset should have cleared any critical errors */
450 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
451 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
453 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
454 reg |= IXL_ICR0_CRIT_ERR_MASK;
455 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
462 * TODO: Make sure this properly handles admin queue / single rx queue intr
467 struct ixl_pf *pf = arg;
468 struct i40e_hw *hw = &pf->hw;
469 struct ixl_vsi *vsi = &pf->vsi;
470 struct ixl_rx_queue *que = vsi->rx_queues;
476 // TODO: Check against proper field
478 /* Clear PBA at start of ISR if using legacy interrupts */
480 wr32(hw, I40E_PFINT_DYN_CTL0,
481 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
482 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
485 icr0 = rd32(hw, I40E_PFINT_ICR0);
489 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
490 iflib_iov_intr_deferred(vsi->ctx);
493 // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
494 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
495 iflib_admin_intr_deferred(vsi->ctx);
497 // TODO: Is intr0 enabled somewhere else?
498 ixl_enable_intr0(hw);
500 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
501 return (FILTER_SCHEDULE_THREAD);
503 return (FILTER_HANDLED);
507 /*********************************************************************
509 * MSI-X VSI Interrupt Service routine
511 **********************************************************************/
513 ixl_msix_que(void *arg)
515 struct ixl_rx_queue *rx_que = arg;
519 ixl_set_queue_rx_itr(rx_que);
520 // ixl_set_queue_tx_itr(que);
522 return (FILTER_SCHEDULE_THREAD);
526 /*********************************************************************
528 * MSI-X Admin Queue Interrupt Service routine
530 **********************************************************************/
532 ixl_msix_adminq(void *arg)
534 struct ixl_pf *pf = arg;
535 struct i40e_hw *hw = &pf->hw;
536 device_t dev = pf->dev;
537 u32 reg, mask, rstat_reg;
538 bool do_task = FALSE;
540 DDPRINTF(dev, "begin");
544 reg = rd32(hw, I40E_PFINT_ICR0);
546 * For masking off interrupt causes that need to be handled before
547 * they can be re-enabled
549 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
551 /* Check on the cause */
552 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
553 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
557 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
558 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
559 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
563 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
564 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
565 device_printf(dev, "Reset Requested!\n");
566 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
567 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
568 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
569 device_printf(dev, "Reset type: ");
571 /* These others might be handled similarly to an EMPR reset */
572 case I40E_RESET_CORER:
575 case I40E_RESET_GLOBR:
578 case I40E_RESET_EMPR:
585 /* overload admin queue task to check reset progress */
586 atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
591 * PE / PCI / ECC exceptions are all handled in the same way:
592 * mask out these three causes, then request a PF reset
594 * TODO: I think at least ECC error requires a GLOBR, not PFR
596 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
597 device_printf(dev, "ECC Error detected!\n");
598 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
599 device_printf(dev, "PCI Exception detected!\n");
600 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
601 device_printf(dev, "Critical Protocol Engine Error detected!\n");
602 /* Checks against the conditions above */
603 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
604 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
605 atomic_set_32(&pf->state,
606 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
610 // TODO: Linux driver never re-enables this interrupt once it has been detected
611 // Then what is supposed to happen? A PF reset? Should it never happen?
612 // TODO: Parse out this error into something human readable
613 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
614 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
615 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
616 device_printf(dev, "HMC Error detected!\n");
617 device_printf(dev, "INFO 0x%08x\n", reg);
618 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
619 device_printf(dev, "DATA 0x%08x\n", reg);
620 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
625 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
626 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
627 iflib_iov_intr_deferred(pf->vsi.ctx);
631 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
632 ixl_enable_intr0(hw);
635 return (FILTER_SCHEDULE_THREAD);
637 return (FILTER_HANDLED);
640 /*********************************************************************
643 * Routines for multicast and vlan filter management.
645 *********************************************************************/
647 ixl_add_multi(struct ixl_vsi *vsi)
649 struct ifmultiaddr *ifma;
650 struct ifnet *ifp = vsi->ifp;
651 struct i40e_hw *hw = vsi->hw;
654 IOCTL_DEBUGOUT("ixl_add_multi: begin");
658 ** First just get a count, to decide if we
659 ** we simply use multicast promiscuous.
661 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
662 if (ifma->ifma_addr->sa_family != AF_LINK)
666 if_maddr_runlock(ifp);
668 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
669 /* delete existing MC filters */
670 ixl_del_hw_filters(vsi, mcnt);
671 i40e_aq_set_vsi_multicast_promiscuous(hw,
672 vsi->seid, TRUE, NULL);
678 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
679 if (ifma->ifma_addr->sa_family != AF_LINK)
681 ixl_add_mc_filter(vsi,
682 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
685 if_maddr_runlock(ifp);
687 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
688 ixl_add_hw_filters(vsi, flags, mcnt);
691 IOCTL_DEBUGOUT("ixl_add_multi: end");
695 ixl_del_multi(struct ixl_vsi *vsi)
697 struct ifnet *ifp = vsi->ifp;
698 struct ifmultiaddr *ifma;
699 struct ixl_mac_filter *f;
703 IOCTL_DEBUGOUT("ixl_del_multi: begin");
705 /* Search for removed multicast addresses */
707 SLIST_FOREACH(f, &vsi->ftl, next) {
708 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
710 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
711 if (ifma->ifma_addr->sa_family != AF_LINK)
713 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
714 if (cmp_etheraddr(f->macaddr, mc_addr)) {
719 if (match == FALSE) {
720 f->flags |= IXL_FILTER_DEL;
725 if_maddr_runlock(ifp);
728 ixl_del_hw_filters(vsi, mcnt);
734 ixl_link_up_msg(struct ixl_pf *pf)
736 struct i40e_hw *hw = &pf->hw;
737 struct ifnet *ifp = pf->vsi.ifp;
738 char *req_fec_string, *neg_fec_string;
741 fec_abilities = hw->phy.link_info.req_fec_info;
742 /* If both RS and KR are requested, only show RS */
743 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
744 req_fec_string = ixl_fec_string[0];
745 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
746 req_fec_string = ixl_fec_string[1];
748 req_fec_string = ixl_fec_string[2];
750 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
751 neg_fec_string = ixl_fec_string[0];
752 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
753 neg_fec_string = ixl_fec_string[1];
755 neg_fec_string = ixl_fec_string[2];
757 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
759 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
760 req_fec_string, neg_fec_string,
761 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
762 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
763 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
764 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
765 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
766 ixl_fc_string[1] : ixl_fc_string[0]);
770 * Configure admin queue/misc interrupt cause registers in hardware.
773 ixl_configure_intr0_msix(struct ixl_pf *pf)
775 struct i40e_hw *hw = &pf->hw;
778 /* First set up the adminq - vector 0 */
779 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
780 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
782 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
783 I40E_PFINT_ICR0_ENA_GRST_MASK |
784 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
785 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
786 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
787 I40E_PFINT_ICR0_ENA_VFLR_MASK |
788 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
789 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
790 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
793 * 0x7FF is the end of the queue list.
794 * This means we won't use MSI-X vector 0 for a queue interrupt
797 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
798 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
799 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
801 wr32(hw, I40E_PFINT_DYN_CTL0,
802 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
803 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
805 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
809 * Configure queue interrupt cause registers in hardware.
811 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
814 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
816 struct i40e_hw *hw = &pf->hw;
817 struct ixl_vsi *vsi = &pf->vsi;
821 // TODO: See if max is really necessary
822 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
823 /* Make sure interrupt is disabled */
824 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
825 /* Set linked list head to point to corresponding RX queue
826 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
827 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
828 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
829 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
830 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
831 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
833 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
834 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
835 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
836 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
837 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
838 wr32(hw, I40E_QINT_RQCTL(i), reg);
840 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
841 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
842 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
843 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
844 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
845 wr32(hw, I40E_QINT_TQCTL(i), reg);
850 * Configure for single interrupt vector operation
853 ixl_configure_legacy(struct ixl_pf *pf)
855 struct i40e_hw *hw = &pf->hw;
856 struct ixl_vsi *vsi = &pf->vsi;
862 vsi->tx_itr_setting = pf->tx_itr;
863 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
864 vsi->tx_itr_setting);
865 txr->itr = vsi->tx_itr_setting;
867 vsi->rx_itr_setting = pf->rx_itr;
868 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
869 vsi->rx_itr_setting);
870 rxr->itr = vsi->rx_itr_setting;
871 /* XXX: Assuming only 1 queue in single interrupt mode */
873 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
875 /* Setup "other" causes */
876 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
877 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
878 | I40E_PFINT_ICR0_ENA_GRST_MASK
879 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
880 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
881 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
882 | I40E_PFINT_ICR0_ENA_VFLR_MASK
883 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
885 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
887 /* No ITR for non-queue interrupts */
888 wr32(hw, I40E_PFINT_STAT_CTL0,
889 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
891 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
892 wr32(hw, I40E_PFINT_LNKLST0, 0);
894 /* Associate the queue pair to the vector and enable the q int */
895 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
896 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
897 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
898 wr32(hw, I40E_QINT_RQCTL(0), reg);
900 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
901 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
902 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
903 wr32(hw, I40E_QINT_TQCTL(0), reg);
907 ixl_free_pci_resources(struct ixl_pf *pf)
909 struct ixl_vsi *vsi = &pf->vsi;
910 device_t dev = iflib_get_dev(vsi->ctx);
911 struct ixl_rx_queue *rx_que = vsi->rx_queues;
913 /* We may get here before stations are set up */
918 ** Release all MSI-X VSI resources:
920 iflib_irq_free(vsi->ctx, &vsi->irq);
922 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
923 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
925 if (pf->pci_mem != NULL)
926 bus_release_resource(dev, SYS_RES_MEMORY,
927 rman_get_rid(pf->pci_mem), pf->pci_mem);
931 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
933 /* Display supported media types */
934 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
935 ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
937 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
938 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
939 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
940 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
941 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
942 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
944 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
945 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
946 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
947 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
949 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
950 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
951 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
952 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
953 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
954 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
956 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
957 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
958 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
959 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
960 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
961 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
962 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
963 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
964 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
965 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
967 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
968 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
970 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
971 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
972 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
973 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
974 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
975 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
976 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
977 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
978 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
979 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
980 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
982 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
983 ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
985 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
986 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
987 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
988 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
990 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
991 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
992 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
993 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
994 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
995 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
996 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
997 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
998 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
999 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1000 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1001 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1004 /*********************************************************************
1006 * Setup networking device structure and register an interface.
1008 **********************************************************************/
1010 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1012 struct ixl_vsi *vsi = &pf->vsi;
1013 if_ctx_t ctx = vsi->ctx;
1014 struct i40e_hw *hw = &pf->hw;
1015 struct ifnet *ifp = iflib_get_ifp(ctx);
1016 struct i40e_aq_get_phy_abilities_resp abilities;
1017 enum i40e_status_code aq_error = 0;
1019 INIT_DBG_DEV(dev, "begin");
1021 vsi->shared->isc_max_frame_size =
1022 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1023 + ETHER_VLAN_ENCAP_LEN;
1025 aq_error = i40e_aq_get_phy_capabilities(hw,
1026 FALSE, TRUE, &abilities, NULL);
1027 /* May need delay to detect fiber correctly */
1028 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1029 /* TODO: Maybe just retry this in a task... */
1030 i40e_msec_delay(200);
1031 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1032 TRUE, &abilities, NULL);
1035 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1036 device_printf(dev, "Unknown PHY type detected!\n");
1039 "Error getting supported media types, err %d,"
1040 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1042 pf->supported_speeds = abilities.link_speed;
1043 #if __FreeBSD_version >= 1100000
1044 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1046 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1049 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1052 /* Use autoselect media by default */
1053 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1054 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1060 * Input: bitmap of enum i40e_aq_link_speed
1063 ixl_max_aq_speed_to_value(u8 link_speeds)
1065 if (link_speeds & I40E_LINK_SPEED_40GB)
1067 if (link_speeds & I40E_LINK_SPEED_25GB)
1069 if (link_speeds & I40E_LINK_SPEED_20GB)
1071 if (link_speeds & I40E_LINK_SPEED_10GB)
1073 if (link_speeds & I40E_LINK_SPEED_1GB)
1075 if (link_speeds & I40E_LINK_SPEED_100MB)
1076 return IF_Mbps(100);
1078 /* Minimum supported link speed */
1079 return IF_Mbps(100);
1083 ** Run when the Admin Queue gets a link state change interrupt.
1086 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1088 struct i40e_hw *hw = &pf->hw;
1089 device_t dev = iflib_get_dev(pf->vsi.ctx);
1090 struct i40e_aqc_get_link_status *status =
1091 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1093 /* Request link status from adapter */
1094 hw->phy.get_link_info = TRUE;
1095 i40e_get_link_status(hw, &pf->link_up);
1097 /* Print out message if an unqualified module is found */
1098 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1099 (pf->advertised_speed) &&
1100 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1101 (!(status->link_info & I40E_AQ_LINK_UP)))
1102 device_printf(dev, "Link failed because "
1103 "an unqualified module was detected!\n");
1105 /* OS link info is updated elsewhere */
1108 /*********************************************************************
1110 * Get Firmware Switch configuration
1111 * - this will need to be more robust when more complex
1112 * switch configurations are enabled.
1114 **********************************************************************/
1116 ixl_switch_config(struct ixl_pf *pf)
1118 struct i40e_hw *hw = &pf->hw;
1119 struct ixl_vsi *vsi = &pf->vsi;
1120 device_t dev = iflib_get_dev(vsi->ctx);
1121 struct i40e_aqc_get_switch_config_resp *sw_config;
1122 u8 aq_buf[I40E_AQ_LARGE_BUF];
1126 memset(&aq_buf, 0, sizeof(aq_buf));
1127 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1128 ret = i40e_aq_get_switch_config(hw, sw_config,
1129 sizeof(aq_buf), &next, NULL);
1131 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1132 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1135 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1137 "Switch config: header reported: %d in structure, %d total\n",
1138 sw_config->header.num_reported, sw_config->header.num_total);
1139 for (int i = 0; i < sw_config->header.num_reported; i++) {
1141 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1142 sw_config->element[i].element_type,
1143 sw_config->element[i].seid,
1144 sw_config->element[i].uplink_seid,
1145 sw_config->element[i].downlink_seid);
1148 /* Simplified due to a single VSI */
1149 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1150 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1151 vsi->seid = sw_config->element[0].seid;
1155 /*********************************************************************
1157 * Initialize the VSI: this handles contexts, which means things
1158 * like the number of descriptors, buffer size,
1159 * plus we init the rings thru this function.
1161 **********************************************************************/
1163 ixl_initialize_vsi(struct ixl_vsi *vsi)
1165 struct ixl_pf *pf = vsi->back;
1166 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
1167 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1168 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1169 device_t dev = iflib_get_dev(vsi->ctx);
1170 struct i40e_hw *hw = vsi->hw;
1171 struct i40e_vsi_context ctxt;
1175 memset(&ctxt, 0, sizeof(ctxt));
1176 ctxt.seid = vsi->seid;
1177 if (pf->veb_seid != 0)
1178 ctxt.uplink_seid = pf->veb_seid;
1179 ctxt.pf_num = hw->pf_id;
1180 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1182 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1183 " aq_error %d\n", err, hw->aq.asq_last_status);
1186 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1187 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1188 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1189 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1190 ctxt.uplink_seid, ctxt.vsi_number,
1191 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1192 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1193 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1195 ** Set the queue and traffic class bits
1196 ** - when multiple traffic classes are supported
1197 ** this will need to be more robust.
1199 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1200 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1201 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1202 ctxt.info.queue_mapping[0] = 0;
1204 * This VSI will only use traffic class 0; start traffic class 0's
1205 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1206 * the driver may not use all of them).
1208 tc_queues = fls(pf->qtag.num_allocated) - 1;
1209 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1210 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1211 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1212 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1214 /* Set VLAN receive stripping mode */
1215 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1216 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1217 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1218 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1220 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1223 /* Set TCP Enable for iWARP capable VSI */
1224 if (ixl_enable_iwarp && pf->iw_enabled) {
1225 ctxt.info.valid_sections |=
1226 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1227 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1230 /* Save VSI number and info for use later */
1231 vsi->vsi_num = ctxt.vsi_number;
1232 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1234 /* Reset VSI statistics */
1235 ixl_vsi_reset_stats(vsi);
1236 vsi->hw_filters_add = 0;
1237 vsi->hw_filters_del = 0;
1239 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1241 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1243 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1244 " aq_error %d\n", err, hw->aq.asq_last_status);
1248 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1249 struct tx_ring *txr = &tx_que->txr;
1250 struct i40e_hmc_obj_txq tctx;
1253 /* Setup the HMC TX Context */
1254 bzero(&tctx, sizeof(tctx));
1255 tctx.new_context = 1;
1256 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1257 tctx.qlen = scctx->isc_ntxd[0];
1258 tctx.fc_ena = 0; /* Disable FCoE */
1260 * This value needs to pulled from the VSI that this queue
1261 * is assigned to. Index into array is traffic class.
1263 tctx.rdylist = vsi->info.qs_handle[0];
1265 * Set these to enable Head Writeback
1266 * - Address is last entry in TX ring (reserved for HWB index)
1267 * Leave these as 0 for Descriptor Writeback
1269 if (vsi->enable_head_writeback) {
1270 tctx.head_wb_ena = 1;
1271 tctx.head_wb_addr = txr->tx_paddr +
1272 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1274 tctx.head_wb_ena = 0;
1275 tctx.head_wb_addr = 0;
1277 tctx.rdylist_act = 0;
1278 err = i40e_clear_lan_tx_queue_context(hw, i);
1280 device_printf(dev, "Unable to clear TX context\n");
1283 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1285 device_printf(dev, "Unable to set TX context\n");
1288 /* Associate the ring with this PF */
1289 txctl = I40E_QTX_CTL_PF_QUEUE;
1290 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1291 I40E_QTX_CTL_PF_INDX_MASK);
1292 wr32(hw, I40E_QTX_CTL(i), txctl);
1295 /* Do ring (re)init */
1296 ixl_init_tx_ring(vsi, tx_que);
1298 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1299 struct rx_ring *rxr = &rx_que->rxr;
1300 struct i40e_hmc_obj_rxq rctx;
1302 /* Next setup the HMC RX Context */
1303 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
1305 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1307 /* Set up an RX context for the HMC */
1308 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1309 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1310 /* ignore header split for now */
1311 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1312 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1313 scctx->isc_max_frame_size : max_rxmax;
1315 rctx.dsize = 1; /* do 32byte descriptors */
1316 rctx.hsplit_0 = 0; /* no header split */
1317 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1318 rctx.qlen = scctx->isc_nrxd[0];
1319 rctx.tphrdesc_ena = 1;
1320 rctx.tphwdesc_ena = 1;
1321 rctx.tphdata_ena = 0; /* Header Split related */
1322 rctx.tphhead_ena = 0; /* Header Split related */
1323 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
1326 rctx.showiv = 1; /* Strip inner VLAN header */
1327 rctx.fc_ena = 0; /* Disable FCoE */
1328 rctx.prefena = 1; /* Prefetch descriptors */
1330 err = i40e_clear_lan_rx_queue_context(hw, i);
1333 "Unable to clear RX context %d\n", i);
1336 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1338 device_printf(dev, "Unable to set RX context %d\n", i);
1341 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1347 ixl_free_mac_filters(struct ixl_vsi *vsi)
1349 struct ixl_mac_filter *f;
1351 while (!SLIST_EMPTY(&vsi->ftl)) {
1352 f = SLIST_FIRST(&vsi->ftl);
1353 SLIST_REMOVE_HEAD(&vsi->ftl, next);
1359 ** Provide a update to the queue RX
1360 ** interrupt moderation value.
1363 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1365 struct ixl_vsi *vsi = que->vsi;
1366 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1367 struct i40e_hw *hw = vsi->hw;
1368 struct rx_ring *rxr = &que->rxr;
1373 /* Idle, do nothing */
1374 if (rxr->bytes == 0)
1377 if (pf->dynamic_rx_itr) {
1378 rx_bytes = rxr->bytes/rxr->itr;
1381 /* Adjust latency range */
1382 switch (rxr->latency) {
1383 case IXL_LOW_LATENCY:
1384 if (rx_bytes > 10) {
1385 rx_latency = IXL_AVE_LATENCY;
1386 rx_itr = IXL_ITR_20K;
1389 case IXL_AVE_LATENCY:
1390 if (rx_bytes > 20) {
1391 rx_latency = IXL_BULK_LATENCY;
1392 rx_itr = IXL_ITR_8K;
1393 } else if (rx_bytes <= 10) {
1394 rx_latency = IXL_LOW_LATENCY;
1395 rx_itr = IXL_ITR_100K;
1398 case IXL_BULK_LATENCY:
1399 if (rx_bytes <= 20) {
1400 rx_latency = IXL_AVE_LATENCY;
1401 rx_itr = IXL_ITR_20K;
1406 rxr->latency = rx_latency;
1408 if (rx_itr != rxr->itr) {
1409 /* do an exponential smoothing */
1410 rx_itr = (10 * rx_itr * rxr->itr) /
1411 ((9 * rx_itr) + rxr->itr);
1412 rxr->itr = min(rx_itr, IXL_MAX_ITR);
1413 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1414 rxr->me), rxr->itr);
1416 } else { /* We may have have toggled to non-dynamic */
1417 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1418 vsi->rx_itr_setting = pf->rx_itr;
1419 /* Update the hardware if needed */
1420 if (rxr->itr != vsi->rx_itr_setting) {
1421 rxr->itr = vsi->rx_itr_setting;
1422 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1423 rxr->me), rxr->itr);
1432 ** Provide a update to the queue TX
1433 ** interrupt moderation value.
1436 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1438 struct ixl_vsi *vsi = que->vsi;
1439 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1440 struct i40e_hw *hw = vsi->hw;
1441 struct tx_ring *txr = &que->txr;
1447 /* Idle, do nothing */
1448 if (txr->bytes == 0)
1451 if (pf->dynamic_tx_itr) {
1452 tx_bytes = txr->bytes/txr->itr;
1455 switch (txr->latency) {
1456 case IXL_LOW_LATENCY:
1457 if (tx_bytes > 10) {
1458 tx_latency = IXL_AVE_LATENCY;
1459 tx_itr = IXL_ITR_20K;
1462 case IXL_AVE_LATENCY:
1463 if (tx_bytes > 20) {
1464 tx_latency = IXL_BULK_LATENCY;
1465 tx_itr = IXL_ITR_8K;
1466 } else if (tx_bytes <= 10) {
1467 tx_latency = IXL_LOW_LATENCY;
1468 tx_itr = IXL_ITR_100K;
1471 case IXL_BULK_LATENCY:
1472 if (tx_bytes <= 20) {
1473 tx_latency = IXL_AVE_LATENCY;
1474 tx_itr = IXL_ITR_20K;
1479 txr->latency = tx_latency;
1481 if (tx_itr != txr->itr) {
1482 /* do an exponential smoothing */
1483 tx_itr = (10 * tx_itr * txr->itr) /
1484 ((9 * tx_itr) + txr->itr);
1485 txr->itr = min(tx_itr, IXL_MAX_ITR);
1486 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1487 txr->me), txr->itr);
1490 } else { /* We may have have toggled to non-dynamic */
1491 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1492 vsi->tx_itr_setting = pf->tx_itr;
1493 /* Update the hardware if needed */
1494 if (txr->itr != vsi->tx_itr_setting) {
1495 txr->itr = vsi->tx_itr_setting;
1496 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1497 txr->me), txr->itr);
1507 * ixl_sysctl_qtx_tail_handler
1508 * Retrieves I40E_QTX_TAIL value from hardware
1512 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1514 struct ixl_tx_queue *tx_que;
1518 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1519 if (!tx_que) return 0;
1521 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1522 error = sysctl_handle_int(oidp, &val, 0, req);
1523 if (error || !req->newptr)
1529 * ixl_sysctl_qrx_tail_handler
1530 * Retrieves I40E_QRX_TAIL value from hardware
1534 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1536 struct ixl_rx_queue *rx_que;
1540 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1541 if (!rx_que) return 0;
1543 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1544 error = sysctl_handle_int(oidp, &val, 0, req);
1545 if (error || !req->newptr)
1552 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1553 * Writes to the ITR registers immediately.
1556 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1558 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1559 device_t dev = pf->dev;
1561 int requested_tx_itr;
1563 requested_tx_itr = pf->tx_itr;
1564 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1565 if ((error) || (req->newptr == NULL))
1567 if (pf->dynamic_tx_itr) {
1569 "Cannot set TX itr value while dynamic TX itr is enabled\n");
1572 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1574 "Invalid TX itr value; value must be between 0 and %d\n",
1579 pf->tx_itr = requested_tx_itr;
1580 ixl_configure_tx_itr(pf);
1586 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1587 * Writes to the ITR registers immediately.
1590 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1592 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1593 device_t dev = pf->dev;
1595 int requested_rx_itr;
1597 requested_rx_itr = pf->rx_itr;
1598 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1599 if ((error) || (req->newptr == NULL))
1601 if (pf->dynamic_rx_itr) {
1603 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1606 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1608 "Invalid RX itr value; value must be between 0 and %d\n",
1613 pf->rx_itr = requested_rx_itr;
1614 ixl_configure_rx_itr(pf);
1620 ixl_add_hw_stats(struct ixl_pf *pf)
1622 struct ixl_vsi *vsi = &pf->vsi;
1623 device_t dev = iflib_get_dev(vsi->ctx);
1624 struct i40e_hw_port_stats *pf_stats = &pf->stats;
1626 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1627 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1628 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1630 /* Driver statistics */
1631 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1632 CTLFLAG_RD, &pf->admin_irq,
1633 "Admin Queue IRQs received");
1635 ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1637 ixl_add_queues_sysctls(dev, vsi);
1639 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1643 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1644 struct sysctl_oid_list *child,
1645 struct i40e_hw_port_stats *stats)
1647 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1648 CTLFLAG_RD, NULL, "Mac Statistics");
1649 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1651 struct i40e_eth_stats *eth_stats = &stats->eth;
1652 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1654 struct ixl_sysctl_info ctls[] =
1656 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1657 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1658 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1659 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1660 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1661 /* Packet Reception Stats */
1662 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1663 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1664 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1665 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1666 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1667 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1668 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1669 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1670 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1671 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1672 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1673 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1674 /* Packet Transmission Stats */
1675 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1676 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1677 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1678 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1679 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1680 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1681 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1683 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1684 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1685 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1686 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1691 struct ixl_sysctl_info *entry = ctls;
1692 while (entry->stat != 0)
1694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1695 CTLFLAG_RD, entry->stat,
1696 entry->description);
1702 ixl_set_rss_key(struct ixl_pf *pf)
1704 struct i40e_hw *hw = &pf->hw;
1705 struct ixl_vsi *vsi = &pf->vsi;
1706 device_t dev = pf->dev;
1707 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1708 enum i40e_status_code status;
1711 /* Fetch the configured RSS key */
1712 rss_getkey((uint8_t *) &rss_seed);
1714 ixl_get_default_rss_key(rss_seed);
1716 /* Fill out hash function seed */
1717 if (hw->mac.type == I40E_MAC_X722) {
1718 struct i40e_aqc_get_set_rss_key_data key_data;
1719 bcopy(rss_seed, &key_data, 52);
1720 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1723 "i40e_aq_set_rss_key status %s, error %s\n",
1724 i40e_stat_str(hw, status),
1725 i40e_aq_str(hw, hw->aq.asq_last_status));
1727 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1728 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1733 * Configure enabled PCTYPES for RSS.
1736 ixl_set_rss_pctypes(struct ixl_pf *pf)
1738 struct i40e_hw *hw = &pf->hw;
1739 u64 set_hena = 0, hena;
1742 u32 rss_hash_config;
1744 rss_hash_config = rss_gethashconfig();
1745 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1746 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1747 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1748 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1749 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1750 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1751 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1752 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1753 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1754 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1755 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1756 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1757 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1758 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1760 if (hw->mac.type == I40E_MAC_X722)
1761 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1763 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1765 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1766 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1768 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1769 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1774 ixl_set_rss_hlut(struct ixl_pf *pf)
1776 struct i40e_hw *hw = &pf->hw;
1777 struct ixl_vsi *vsi = &pf->vsi;
1778 device_t dev = iflib_get_dev(vsi->ctx);
1780 int lut_entry_width;
1782 enum i40e_status_code status;
1784 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1786 /* Populate the LUT with max no. of queues in round robin fashion */
1788 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1791 * Fetch the RSS bucket id for the given indirection entry.
1792 * Cap it at the number of configured buckets (which is
1795 que_id = rss_get_indirection_to_bucket(i);
1796 que_id = que_id % vsi->num_rx_queues;
1798 que_id = i % vsi->num_rx_queues;
1800 lut = (que_id & ((0x1 << lut_entry_width) - 1));
1804 if (hw->mac.type == I40E_MAC_X722) {
1805 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1807 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1808 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1810 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1811 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1817 ** Setup the PF's RSS parameters.
1820 ixl_config_rss(struct ixl_pf *pf)
1822 ixl_set_rss_key(pf);
1823 ixl_set_rss_pctypes(pf);
1824 ixl_set_rss_hlut(pf);
1828 ** This routine updates vlan filters, called by init
1829 ** it scans the filter table and then updates the hw
1830 ** after a soft reset.
1833 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1835 struct ixl_mac_filter *f;
1838 if (vsi->num_vlans == 0)
1841 ** Scan the filter list for vlan entries,
1842 ** mark them for addition and then call
1843 ** for the AQ update.
1845 SLIST_FOREACH(f, &vsi->ftl, next) {
1846 if (f->flags & IXL_FILTER_VLAN) {
1854 printf("setup vlan: no filters found!\n");
1857 flags = IXL_FILTER_VLAN;
1858 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1859 ixl_add_hw_filters(vsi, flags, cnt);
1863 * In some firmware versions there is default MAC/VLAN filter
1864 * configured which interferes with filters managed by driver.
1865 * Make sure it's removed.
1868 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1870 struct i40e_aqc_remove_macvlan_element_data e;
1872 bzero(&e, sizeof(e));
1873 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1875 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1876 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1878 bzero(&e, sizeof(e));
1879 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1881 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1882 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1883 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1887 ** Initialize filter list and add filters that the hardware
1888 ** needs to know about.
1890 ** Requires VSI's filter list & seid to be set before calling.
1893 ixl_init_filters(struct ixl_vsi *vsi)
1895 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1897 /* Initialize mac filter list for VSI */
1898 SLIST_INIT(&vsi->ftl);
1900 /* Receive broadcast Ethernet frames */
1901 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1903 ixl_del_default_hw_filters(vsi);
1905 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1907 * Prevent Tx flow control frames from being sent out by
1908 * non-firmware transmitters.
1909 * This affects every VSI in the PF.
1911 if (pf->enable_tx_fc_filter)
1912 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1916 ** This routine adds mulicast filters
1919 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1921 struct ixl_mac_filter *f;
1923 /* Does one already exist */
1924 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1928 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1930 f->flags |= IXL_FILTER_MC;
1932 printf("WARNING: no filter available!!\n");
1936 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1938 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1942 * This routine adds a MAC/VLAN filter to the software filter
1943 * list, then adds that new filter to the HW if it doesn't already
1944 * exist in the SW filter list.
1947 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1949 struct ixl_mac_filter *f, *tmp;
1953 DEBUGOUT("ixl_add_filter: begin");
1958 /* Does one already exist */
1959 f = ixl_find_filter(vsi, macaddr, vlan);
1963 ** Is this the first vlan being registered, if so we
1964 ** need to remove the ANY filter that indicates we are
1965 ** not in a vlan, and replace that with a 0 filter.
1967 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1968 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1970 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1971 ixl_add_filter(vsi, macaddr, 0);
1975 f = ixl_new_filter(vsi, macaddr, vlan);
1977 device_printf(dev, "WARNING: no filter available!!\n");
1980 if (f->vlan != IXL_VLAN_ANY)
1981 f->flags |= IXL_FILTER_VLAN;
1985 f->flags |= IXL_FILTER_USED;
1986 ixl_add_hw_filters(vsi, f->flags, 1);
1990 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1992 struct ixl_mac_filter *f;
1994 f = ixl_find_filter(vsi, macaddr, vlan);
1998 f->flags |= IXL_FILTER_DEL;
1999 ixl_del_hw_filters(vsi, 1);
2000 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2003 /* Check if this is the last vlan removal */
2004 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2005 /* Switch back to a non-vlan filter */
2006 ixl_del_filter(vsi, macaddr, 0);
2007 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2013 ** Find the filter with both matching mac addr and vlan id
2015 struct ixl_mac_filter *
2016 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2018 struct ixl_mac_filter *f;
2020 SLIST_FOREACH(f, &vsi->ftl, next) {
2021 if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2022 && (f->vlan == vlan)) {
2031 ** This routine takes additions to the vsi filter
2032 ** table and creates an Admin Queue call to create
2033 ** the filters in the hardware.
2036 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2038 struct i40e_aqc_add_macvlan_element_data *a, *b;
2039 struct ixl_mac_filter *f;
2043 enum i40e_status_code status;
2051 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2055 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2056 M_DEVBUF, M_NOWAIT | M_ZERO);
2058 device_printf(dev, "add_hw_filters failed to get memory\n");
2063 ** Scan the filter list, each time we find one
2064 ** we add it to the admin queue array and turn off
2067 SLIST_FOREACH(f, &vsi->ftl, next) {
2068 if ((f->flags & flags) == flags) {
2069 b = &a[j]; // a pox on fvl long names :)
2070 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2071 if (f->vlan == IXL_VLAN_ANY) {
2073 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2075 b->vlan_tag = f->vlan;
2078 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2079 f->flags &= ~IXL_FILTER_ADD;
2082 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2083 MAC_FORMAT_ARGS(f->macaddr));
2089 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2091 device_printf(dev, "i40e_aq_add_macvlan status %s, "
2092 "error %s\n", i40e_stat_str(hw, status),
2093 i40e_aq_str(hw, hw->aq.asq_last_status));
2095 vsi->hw_filters_add += j;
2102 ** This routine takes removals in the vsi filter
2103 ** table and creates an Admin Queue call to delete
2104 ** the filters in the hardware.
2107 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2109 struct i40e_aqc_remove_macvlan_element_data *d, *e;
2113 struct ixl_mac_filter *f, *f_temp;
2114 enum i40e_status_code status;
2121 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2122 M_DEVBUF, M_NOWAIT | M_ZERO);
2124 device_printf(dev, "%s: failed to get memory\n", __func__);
2128 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2129 if (f->flags & IXL_FILTER_DEL) {
2130 e = &d[j]; // a pox on fvl long names :)
2131 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2132 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2133 if (f->vlan == IXL_VLAN_ANY) {
2135 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2137 e->vlan_tag = f->vlan;
2140 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2141 MAC_FORMAT_ARGS(f->macaddr));
2143 /* delete entry from vsi list */
2144 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2152 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2155 for (int i = 0; i < j; i++)
2156 sc += (!d[i].error_code);
2157 vsi->hw_filters_del += sc;
2159 "Failed to remove %d/%d filters, error %s\n",
2160 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2162 vsi->hw_filters_del += j;
2169 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2171 struct i40e_hw *hw = &pf->hw;
2176 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2178 ixl_dbg(pf, IXL_DBG_EN_DIS,
2179 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2182 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2184 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2185 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2186 I40E_QTX_ENA_QENA_STAT_MASK;
2187 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2188 /* Verify the enable took */
2189 for (int j = 0; j < 10; j++) {
2190 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2191 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2193 i40e_usec_delay(10);
2195 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2196 device_printf(pf->dev, "TX queue %d still disabled!\n",
2205 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2207 struct i40e_hw *hw = &pf->hw;
2212 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2214 ixl_dbg(pf, IXL_DBG_EN_DIS,
2215 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2218 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2219 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2220 I40E_QRX_ENA_QENA_STAT_MASK;
2221 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2222 /* Verify the enable took */
2223 for (int j = 0; j < 10; j++) {
2224 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2225 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2227 i40e_usec_delay(10);
2229 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2230 device_printf(pf->dev, "RX queue %d still disabled!\n",
2239 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2243 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2244 /* Called function already prints error message */
2247 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2251 /* For PF VSI only */
2253 ixl_enable_rings(struct ixl_vsi *vsi)
2255 struct ixl_pf *pf = vsi->back;
2258 for (int i = 0; i < vsi->num_tx_queues; i++)
2259 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2261 for (int i = 0; i < vsi->num_rx_queues; i++)
2262 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2268 * Returns error on first ring that is detected hung.
2271 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2273 struct i40e_hw *hw = &pf->hw;
2278 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2280 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2281 i40e_usec_delay(500);
2283 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2284 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2285 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2286 /* Verify the disable took */
2287 for (int j = 0; j < 10; j++) {
2288 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2289 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2291 i40e_msec_delay(10);
2293 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2294 device_printf(pf->dev, "TX queue %d still enabled!\n",
2303 * Returns error on first ring that is detected hung.
2306 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2308 struct i40e_hw *hw = &pf->hw;
2313 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2315 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2316 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2317 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2318 /* Verify the disable took */
2319 for (int j = 0; j < 10; j++) {
2320 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2321 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2323 i40e_msec_delay(10);
2325 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2326 device_printf(pf->dev, "RX queue %d still enabled!\n",
2335 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2339 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2340 /* Called function already prints error message */
2343 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2348 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2352 for (int i = 0; i < vsi->num_tx_queues; i++)
2353 error = ixl_disable_tx_ring(pf, qtag, i);
2355 for (int i = 0; i < vsi->num_rx_queues; i++)
2356 error = ixl_disable_rx_ring(pf, qtag, i);
2362 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2364 struct i40e_hw *hw = &pf->hw;
2365 device_t dev = pf->dev;
2367 bool mdd_detected = false;
2368 bool pf_mdd_detected = false;
2369 bool vf_mdd_detected = false;
2372 u8 pf_mdet_num, vp_mdet_num;
2375 /* find what triggered the MDD event */
2376 reg = rd32(hw, I40E_GL_MDET_TX);
2377 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2378 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2379 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2380 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2381 I40E_GL_MDET_TX_VF_NUM_SHIFT;
2382 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2383 I40E_GL_MDET_TX_EVENT_SHIFT;
2384 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2385 I40E_GL_MDET_TX_QUEUE_SHIFT;
2386 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2387 mdd_detected = true;
2393 reg = rd32(hw, I40E_PF_MDET_TX);
2394 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2395 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2396 pf_mdet_num = hw->pf_id;
2397 pf_mdd_detected = true;
2400 /* Check if MDD was caused by a VF */
2401 for (int i = 0; i < pf->num_vfs; i++) {
2403 reg = rd32(hw, I40E_VP_MDET_TX(i));
2404 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2405 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2407 vf->num_mdd_events++;
2408 vf_mdd_detected = true;
2412 /* Print out an error message */
2413 if (vf_mdd_detected && pf_mdd_detected)
2415 "Malicious Driver Detection event %d"
2416 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2417 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2418 else if (vf_mdd_detected && !pf_mdd_detected)
2420 "Malicious Driver Detection event %d"
2421 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2422 event, queue, pf_num, vf_num, vp_mdet_num);
2423 else if (!vf_mdd_detected && pf_mdd_detected)
2425 "Malicious Driver Detection event %d"
2426 " on TX queue %d, pf number %d (PF-%d)\n",
2427 event, queue, pf_num, pf_mdet_num);
2428 /* Theoretically shouldn't happen */
2431 "TX Malicious Driver Detection event (unknown)\n");
2435 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2437 struct i40e_hw *hw = &pf->hw;
2438 device_t dev = pf->dev;
2440 bool mdd_detected = false;
2441 bool pf_mdd_detected = false;
2442 bool vf_mdd_detected = false;
2445 u8 pf_mdet_num, vp_mdet_num;
2449 * GL_MDET_RX doesn't contain VF number information, unlike
2452 reg = rd32(hw, I40E_GL_MDET_RX);
2453 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2454 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2455 I40E_GL_MDET_RX_FUNCTION_SHIFT;
2456 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2457 I40E_GL_MDET_RX_EVENT_SHIFT;
2458 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2459 I40E_GL_MDET_RX_QUEUE_SHIFT;
2460 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2461 mdd_detected = true;
2467 reg = rd32(hw, I40E_PF_MDET_RX);
2468 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2469 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2470 pf_mdet_num = hw->pf_id;
2471 pf_mdd_detected = true;
2474 /* Check if MDD was caused by a VF */
2475 for (int i = 0; i < pf->num_vfs; i++) {
2477 reg = rd32(hw, I40E_VP_MDET_RX(i));
2478 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2479 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2481 vf->num_mdd_events++;
2482 vf_mdd_detected = true;
2486 /* Print out an error message */
2487 if (vf_mdd_detected && pf_mdd_detected)
2489 "Malicious Driver Detection event %d"
2490 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2491 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2492 else if (vf_mdd_detected && !pf_mdd_detected)
2494 "Malicious Driver Detection event %d"
2495 " on RX queue %d, pf number %d, (VF-%d)\n",
2496 event, queue, pf_num, vp_mdet_num);
2497 else if (!vf_mdd_detected && pf_mdd_detected)
2499 "Malicious Driver Detection event %d"
2500 " on RX queue %d, pf number %d (PF-%d)\n",
2501 event, queue, pf_num, pf_mdet_num);
2502 /* Theoretically shouldn't happen */
2505 "RX Malicious Driver Detection event (unknown)\n");
2509 * ixl_handle_mdd_event
2511 * Called from interrupt handler to identify possibly malicious vfs
2512 * (But also detects events from the PF, as well)
2515 ixl_handle_mdd_event(struct ixl_pf *pf)
2517 struct i40e_hw *hw = &pf->hw;
2521 * Handle both TX/RX because it's possible they could
2522 * both trigger in the same interrupt.
2524 ixl_handle_tx_mdd_event(pf);
2525 ixl_handle_rx_mdd_event(pf);
2527 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2529 /* re-enable mdd interrupt cause */
2530 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2531 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2532 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2537 ixl_enable_intr(struct ixl_vsi *vsi)
2539 struct i40e_hw *hw = vsi->hw;
2540 struct ixl_rx_queue *que = vsi->rx_queues;
2542 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2543 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2544 ixl_enable_queue(hw, que->rxr.me);
2546 ixl_enable_intr0(hw);
2550 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2552 struct i40e_hw *hw = vsi->hw;
2553 struct ixl_rx_queue *que = vsi->rx_queues;
2555 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2556 ixl_disable_queue(hw, que->rxr.me);
2560 ixl_enable_intr0(struct i40e_hw *hw)
2564 /* Use IXL_ITR_NONE so ITR isn't updated here */
2565 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2566 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2567 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2568 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2572 ixl_disable_intr0(struct i40e_hw *hw)
2576 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2577 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2582 ixl_enable_queue(struct i40e_hw *hw, int id)
2586 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2587 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2588 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2589 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2593 ixl_disable_queue(struct i40e_hw *hw, int id)
2597 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2598 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2602 ixl_update_stats_counters(struct ixl_pf *pf)
2604 struct i40e_hw *hw = &pf->hw;
2605 struct ixl_vsi *vsi = &pf->vsi;
2608 struct i40e_hw_port_stats *nsd = &pf->stats;
2609 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2611 /* Update hw stats */
2612 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2613 pf->stat_offsets_loaded,
2614 &osd->crc_errors, &nsd->crc_errors);
2615 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2616 pf->stat_offsets_loaded,
2617 &osd->illegal_bytes, &nsd->illegal_bytes);
2618 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2619 I40E_GLPRT_GORCL(hw->port),
2620 pf->stat_offsets_loaded,
2621 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2622 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2623 I40E_GLPRT_GOTCL(hw->port),
2624 pf->stat_offsets_loaded,
2625 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2626 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2627 pf->stat_offsets_loaded,
2628 &osd->eth.rx_discards,
2629 &nsd->eth.rx_discards);
2630 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2631 I40E_GLPRT_UPRCL(hw->port),
2632 pf->stat_offsets_loaded,
2633 &osd->eth.rx_unicast,
2634 &nsd->eth.rx_unicast);
2635 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2636 I40E_GLPRT_UPTCL(hw->port),
2637 pf->stat_offsets_loaded,
2638 &osd->eth.tx_unicast,
2639 &nsd->eth.tx_unicast);
2640 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2641 I40E_GLPRT_MPRCL(hw->port),
2642 pf->stat_offsets_loaded,
2643 &osd->eth.rx_multicast,
2644 &nsd->eth.rx_multicast);
2645 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2646 I40E_GLPRT_MPTCL(hw->port),
2647 pf->stat_offsets_loaded,
2648 &osd->eth.tx_multicast,
2649 &nsd->eth.tx_multicast);
2650 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2651 I40E_GLPRT_BPRCL(hw->port),
2652 pf->stat_offsets_loaded,
2653 &osd->eth.rx_broadcast,
2654 &nsd->eth.rx_broadcast);
2655 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2656 I40E_GLPRT_BPTCL(hw->port),
2657 pf->stat_offsets_loaded,
2658 &osd->eth.tx_broadcast,
2659 &nsd->eth.tx_broadcast);
2661 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2662 pf->stat_offsets_loaded,
2663 &osd->tx_dropped_link_down,
2664 &nsd->tx_dropped_link_down);
2665 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2666 pf->stat_offsets_loaded,
2667 &osd->mac_local_faults,
2668 &nsd->mac_local_faults);
2669 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2670 pf->stat_offsets_loaded,
2671 &osd->mac_remote_faults,
2672 &nsd->mac_remote_faults);
2673 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2674 pf->stat_offsets_loaded,
2675 &osd->rx_length_errors,
2676 &nsd->rx_length_errors);
2678 /* Flow control (LFC) stats */
2679 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2680 pf->stat_offsets_loaded,
2681 &osd->link_xon_rx, &nsd->link_xon_rx);
2682 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2683 pf->stat_offsets_loaded,
2684 &osd->link_xon_tx, &nsd->link_xon_tx);
2685 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2686 pf->stat_offsets_loaded,
2687 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2688 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2689 pf->stat_offsets_loaded,
2690 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2692 /* Packet size stats rx */
2693 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2694 I40E_GLPRT_PRC64L(hw->port),
2695 pf->stat_offsets_loaded,
2696 &osd->rx_size_64, &nsd->rx_size_64);
2697 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2698 I40E_GLPRT_PRC127L(hw->port),
2699 pf->stat_offsets_loaded,
2700 &osd->rx_size_127, &nsd->rx_size_127);
2701 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2702 I40E_GLPRT_PRC255L(hw->port),
2703 pf->stat_offsets_loaded,
2704 &osd->rx_size_255, &nsd->rx_size_255);
2705 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2706 I40E_GLPRT_PRC511L(hw->port),
2707 pf->stat_offsets_loaded,
2708 &osd->rx_size_511, &nsd->rx_size_511);
2709 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2710 I40E_GLPRT_PRC1023L(hw->port),
2711 pf->stat_offsets_loaded,
2712 &osd->rx_size_1023, &nsd->rx_size_1023);
2713 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2714 I40E_GLPRT_PRC1522L(hw->port),
2715 pf->stat_offsets_loaded,
2716 &osd->rx_size_1522, &nsd->rx_size_1522);
2717 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2718 I40E_GLPRT_PRC9522L(hw->port),
2719 pf->stat_offsets_loaded,
2720 &osd->rx_size_big, &nsd->rx_size_big);
2722 /* Packet size stats tx */
2723 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2724 I40E_GLPRT_PTC64L(hw->port),
2725 pf->stat_offsets_loaded,
2726 &osd->tx_size_64, &nsd->tx_size_64);
2727 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2728 I40E_GLPRT_PTC127L(hw->port),
2729 pf->stat_offsets_loaded,
2730 &osd->tx_size_127, &nsd->tx_size_127);
2731 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2732 I40E_GLPRT_PTC255L(hw->port),
2733 pf->stat_offsets_loaded,
2734 &osd->tx_size_255, &nsd->tx_size_255);
2735 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2736 I40E_GLPRT_PTC511L(hw->port),
2737 pf->stat_offsets_loaded,
2738 &osd->tx_size_511, &nsd->tx_size_511);
2739 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2740 I40E_GLPRT_PTC1023L(hw->port),
2741 pf->stat_offsets_loaded,
2742 &osd->tx_size_1023, &nsd->tx_size_1023);
2743 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2744 I40E_GLPRT_PTC1522L(hw->port),
2745 pf->stat_offsets_loaded,
2746 &osd->tx_size_1522, &nsd->tx_size_1522);
2747 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2748 I40E_GLPRT_PTC9522L(hw->port),
2749 pf->stat_offsets_loaded,
2750 &osd->tx_size_big, &nsd->tx_size_big);
2752 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2753 pf->stat_offsets_loaded,
2754 &osd->rx_undersize, &nsd->rx_undersize);
2755 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2756 pf->stat_offsets_loaded,
2757 &osd->rx_fragments, &nsd->rx_fragments);
2758 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2759 pf->stat_offsets_loaded,
2760 &osd->rx_oversize, &nsd->rx_oversize);
2761 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2762 pf->stat_offsets_loaded,
2763 &osd->rx_jabber, &nsd->rx_jabber);
2764 pf->stat_offsets_loaded = true;
2767 /* Update vsi stats */
2768 ixl_update_vsi_stats(vsi);
2770 for (int i = 0; i < pf->num_vfs; i++) {
2772 if (vf->vf_flags & VF_FLAG_ENABLED)
2773 ixl_update_eth_stats(&pf->vfs[i].vsi);
2778 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2780 struct i40e_hw *hw = &pf->hw;
2781 device_t dev = pf->dev;
2784 error = i40e_shutdown_lan_hmc(hw);
2787 "Shutdown LAN HMC failed with code %d\n", error);
2789 ixl_disable_intr0(hw);
2791 error = i40e_shutdown_adminq(hw);
2794 "Shutdown Admin queue failed with code %d\n", error);
2796 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2801 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2803 struct i40e_hw *hw = &pf->hw;
2804 struct ixl_vsi *vsi = &pf->vsi;
2805 device_t dev = pf->dev;
2808 device_printf(dev, "Rebuilding driver state...\n");
2810 error = i40e_pf_reset(hw);
2812 device_printf(dev, "PF reset failure %s\n",
2813 i40e_stat_str(hw, error));
2814 goto ixl_rebuild_hw_structs_after_reset_err;
2818 error = i40e_init_adminq(hw);
2819 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2820 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2822 goto ixl_rebuild_hw_structs_after_reset_err;
2825 i40e_clear_pxe_mode(hw);
2827 error = ixl_get_hw_capabilities(pf);
2829 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2830 goto ixl_rebuild_hw_structs_after_reset_err;
2833 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2834 hw->func_caps.num_rx_qp, 0, 0);
2836 device_printf(dev, "init_lan_hmc failed: %d\n", error);
2837 goto ixl_rebuild_hw_structs_after_reset_err;
2840 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2842 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2843 goto ixl_rebuild_hw_structs_after_reset_err;
2846 /* reserve a contiguous allocation for the PF's VSI */
2847 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2849 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2851 /* TODO: error handling */
2854 error = ixl_switch_config(pf);
2856 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2859 goto ixl_rebuild_hw_structs_after_reset_err;
2862 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2865 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2866 " aq_err %d\n", error, hw->aq.asq_last_status);
2868 goto ixl_rebuild_hw_structs_after_reset_err;
2872 error = i40e_set_fc(hw, &set_fc_err_mask, true);
2874 device_printf(dev, "init: setting link flow control failed; retcode %d,"
2875 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2877 goto ixl_rebuild_hw_structs_after_reset_err;
2880 /* Remove default filters reinstalled by FW on reset */
2881 ixl_del_default_hw_filters(vsi);
2883 /* Determine link state */
2884 if (ixl_attach_get_link_status(pf)) {
2886 /* TODO: error handling */
2889 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2890 ixl_get_fw_lldp_status(pf);
2892 /* Keep admin queue interrupts active while driver is loaded */
2893 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2894 ixl_configure_intr0_msix(pf);
2895 ixl_enable_intr0(hw);
2898 device_printf(dev, "Rebuilding driver state done.\n");
2901 ixl_rebuild_hw_structs_after_reset_err:
2902 device_printf(dev, "Reload the driver to recover\n");
2907 ixl_handle_empr_reset(struct ixl_pf *pf)
2909 struct ixl_vsi *vsi = &pf->vsi;
2910 struct i40e_hw *hw = &pf->hw;
2911 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2915 ixl_prepare_for_reset(pf, is_up);
2917 /* Typically finishes within 3-4 seconds */
2918 while (count++ < 100) {
2919 reg = rd32(hw, I40E_GLGEN_RSTAT)
2920 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2922 i40e_msec_delay(100);
2926 ixl_dbg(pf, IXL_DBG_INFO,
2927 "Reset wait count: %d\n", count);
2929 ixl_rebuild_hw_structs_after_reset(pf);
2931 atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2935 * Update VSI-specific ethernet statistics counters.
2938 ixl_update_eth_stats(struct ixl_vsi *vsi)
2940 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2941 struct i40e_hw *hw = &pf->hw;
2942 struct i40e_eth_stats *es;
2943 struct i40e_eth_stats *oes;
2944 struct i40e_hw_port_stats *nsd;
2945 u16 stat_idx = vsi->info.stat_counter_idx;
2947 es = &vsi->eth_stats;
2948 oes = &vsi->eth_stats_offsets;
2951 /* Gather up the stats that the hw collects */
2952 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2953 vsi->stat_offsets_loaded,
2954 &oes->tx_errors, &es->tx_errors);
2955 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2956 vsi->stat_offsets_loaded,
2957 &oes->rx_discards, &es->rx_discards);
2959 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2960 I40E_GLV_GORCL(stat_idx),
2961 vsi->stat_offsets_loaded,
2962 &oes->rx_bytes, &es->rx_bytes);
2963 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2964 I40E_GLV_UPRCL(stat_idx),
2965 vsi->stat_offsets_loaded,
2966 &oes->rx_unicast, &es->rx_unicast);
2967 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2968 I40E_GLV_MPRCL(stat_idx),
2969 vsi->stat_offsets_loaded,
2970 &oes->rx_multicast, &es->rx_multicast);
2971 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2972 I40E_GLV_BPRCL(stat_idx),
2973 vsi->stat_offsets_loaded,
2974 &oes->rx_broadcast, &es->rx_broadcast);
2976 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2977 I40E_GLV_GOTCL(stat_idx),
2978 vsi->stat_offsets_loaded,
2979 &oes->tx_bytes, &es->tx_bytes);
2980 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2981 I40E_GLV_UPTCL(stat_idx),
2982 vsi->stat_offsets_loaded,
2983 &oes->tx_unicast, &es->tx_unicast);
2984 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2985 I40E_GLV_MPTCL(stat_idx),
2986 vsi->stat_offsets_loaded,
2987 &oes->tx_multicast, &es->tx_multicast);
2988 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2989 I40E_GLV_BPTCL(stat_idx),
2990 vsi->stat_offsets_loaded,
2991 &oes->tx_broadcast, &es->tx_broadcast);
2992 vsi->stat_offsets_loaded = true;
2996 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3000 struct i40e_eth_stats *es;
3003 struct i40e_hw_port_stats *nsd;
3007 es = &vsi->eth_stats;
3010 ixl_update_eth_stats(vsi);
3012 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3014 /* Update ifnet stats */
3015 IXL_SET_IPACKETS(vsi, es->rx_unicast +
3018 IXL_SET_OPACKETS(vsi, es->tx_unicast +
3021 IXL_SET_IBYTES(vsi, es->rx_bytes);
3022 IXL_SET_OBYTES(vsi, es->tx_bytes);
3023 IXL_SET_IMCASTS(vsi, es->rx_multicast);
3024 IXL_SET_OMCASTS(vsi, es->tx_multicast);
3026 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3027 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3029 IXL_SET_OERRORS(vsi, es->tx_errors);
3030 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3031 IXL_SET_OQDROPS(vsi, tx_discards);
3032 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3033 IXL_SET_COLLISIONS(vsi, 0);
3037 * Reset all of the stats for the given pf
3040 ixl_pf_reset_stats(struct ixl_pf *pf)
3042 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3043 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3044 pf->stat_offsets_loaded = false;
3048 * Resets all stats of the given vsi
3051 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3053 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3054 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3055 vsi->stat_offsets_loaded = false;
3059 * Read and update a 48 bit stat from the hw
3061 * Since the device stats are not reset at PFReset, they likely will not
3062 * be zeroed when the driver starts. We'll save the first values read
3063 * and use them as offsets to be subtracted from the raw values in order
3064 * to report stats that count from zero.
3067 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3068 bool offset_loaded, u64 *offset, u64 *stat)
3072 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3073 new_data = rd64(hw, loreg);
3076 * Use two rd32's instead of one rd64; FreeBSD versions before
3077 * 10 don't support 64-bit bus reads/writes.
3079 new_data = rd32(hw, loreg);
3080 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3085 if (new_data >= *offset)
3086 *stat = new_data - *offset;
3088 *stat = (new_data + ((u64)1 << 48)) - *offset;
3089 *stat &= 0xFFFFFFFFFFFFULL;
3093 * Read and update a 32 bit stat from the hw
3096 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3097 bool offset_loaded, u64 *offset, u64 *stat)
3101 new_data = rd32(hw, reg);
3104 if (new_data >= *offset)
3105 *stat = (u32)(new_data - *offset);
3107 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3111 ixl_add_device_sysctls(struct ixl_pf *pf)
3113 device_t dev = pf->dev;
3114 struct i40e_hw *hw = &pf->hw;
3116 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3117 struct sysctl_oid_list *ctx_list =
3118 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3120 struct sysctl_oid *debug_node;
3121 struct sysctl_oid_list *debug_list;
3123 struct sysctl_oid *fec_node;
3124 struct sysctl_oid_list *fec_list;
3126 /* Set up sysctls */
3127 SYSCTL_ADD_PROC(ctx, ctx_list,
3128 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3129 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3131 SYSCTL_ADD_PROC(ctx, ctx_list,
3132 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3133 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3135 SYSCTL_ADD_PROC(ctx, ctx_list,
3136 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3137 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3139 SYSCTL_ADD_PROC(ctx, ctx_list,
3140 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3141 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3143 SYSCTL_ADD_PROC(ctx, ctx_list,
3144 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3145 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3147 SYSCTL_ADD_PROC(ctx, ctx_list,
3148 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3149 pf, 0, ixl_sysctl_unallocated_queues, "I",
3150 "Queues not allocated to a PF or VF");
3152 SYSCTL_ADD_PROC(ctx, ctx_list,
3153 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3154 pf, 0, ixl_sysctl_pf_tx_itr, "I",
3155 "Immediately set TX ITR value for all queues");
3157 SYSCTL_ADD_PROC(ctx, ctx_list,
3158 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3159 pf, 0, ixl_sysctl_pf_rx_itr, "I",
3160 "Immediately set RX ITR value for all queues");
3162 SYSCTL_ADD_INT(ctx, ctx_list,
3163 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3164 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3166 SYSCTL_ADD_INT(ctx, ctx_list,
3167 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3168 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3170 /* Add FEC sysctls for 25G adapters */
3171 if (i40e_is_25G_device(hw->device_id)) {
3172 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3173 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3174 fec_list = SYSCTL_CHILDREN(fec_node);
3176 SYSCTL_ADD_PROC(ctx, fec_list,
3177 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3178 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3180 SYSCTL_ADD_PROC(ctx, fec_list,
3181 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3182 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3184 SYSCTL_ADD_PROC(ctx, fec_list,
3185 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3186 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3188 SYSCTL_ADD_PROC(ctx, fec_list,
3189 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3190 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3192 SYSCTL_ADD_PROC(ctx, fec_list,
3193 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3194 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3197 SYSCTL_ADD_PROC(ctx, ctx_list,
3198 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3199 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3201 /* Add sysctls meant to print debug information, but don't list them
3202 * in "sysctl -a" output. */
3203 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3204 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3205 debug_list = SYSCTL_CHILDREN(debug_node);
3207 SYSCTL_ADD_UINT(ctx, debug_list,
3208 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3209 &pf->hw.debug_mask, 0, "Shared code debug message level");
3211 SYSCTL_ADD_UINT(ctx, debug_list,
3212 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3213 &pf->dbg_mask, 0, "Non-shared code debug message level");
3215 SYSCTL_ADD_PROC(ctx, debug_list,
3216 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3217 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3219 SYSCTL_ADD_PROC(ctx, debug_list,
3220 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3221 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3223 SYSCTL_ADD_PROC(ctx, debug_list,
3224 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3225 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3227 SYSCTL_ADD_PROC(ctx, debug_list,
3228 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3229 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3231 SYSCTL_ADD_PROC(ctx, debug_list,
3232 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3233 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3235 SYSCTL_ADD_PROC(ctx, debug_list,
3236 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3237 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3239 SYSCTL_ADD_PROC(ctx, debug_list,
3240 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3241 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3243 SYSCTL_ADD_PROC(ctx, debug_list,
3244 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3245 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3247 SYSCTL_ADD_PROC(ctx, debug_list,
3248 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3249 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3251 SYSCTL_ADD_PROC(ctx, debug_list,
3252 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3253 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3255 SYSCTL_ADD_PROC(ctx, debug_list,
3256 OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3257 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3259 SYSCTL_ADD_PROC(ctx, debug_list,
3260 OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3261 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3263 SYSCTL_ADD_PROC(ctx, debug_list,
3264 OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3265 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3267 SYSCTL_ADD_PROC(ctx, debug_list,
3268 OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3269 pf, 0, ixl_sysctl_do_emp_reset, "I",
3270 "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3272 SYSCTL_ADD_PROC(ctx, debug_list,
3273 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3274 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3277 SYSCTL_ADD_PROC(ctx, debug_list,
3278 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3279 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3281 SYSCTL_ADD_PROC(ctx, debug_list,
3282 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3283 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3285 SYSCTL_ADD_PROC(ctx, debug_list,
3286 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3287 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3292 * Primarily for finding out how many queues can be assigned to VFs,
3296 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3298 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3301 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3303 return sysctl_handle_int(oidp, NULL, queues, req);
3307 ** Set flow control using sysctl:
3314 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3316 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3317 struct i40e_hw *hw = &pf->hw;
3318 device_t dev = pf->dev;
3319 int requested_fc, error = 0;
3320 enum i40e_status_code aq_error = 0;
3324 requested_fc = pf->fc;
3325 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3326 if ((error) || (req->newptr == NULL))
3328 if (requested_fc < 0 || requested_fc > 3) {
3330 "Invalid fc mode; valid modes are 0 through 3\n");
3334 /* Set fc ability for port */
3335 hw->fc.requested_mode = requested_fc;
3336 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3339 "%s: Error setting new fc mode %d; fc_err %#x\n",
3340 __func__, aq_error, fc_aq_err);
3343 pf->fc = requested_fc;
3349 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3363 switch (link_speed) {
3364 case I40E_LINK_SPEED_100MB:
3367 case I40E_LINK_SPEED_1GB:
3370 case I40E_LINK_SPEED_10GB:
3373 case I40E_LINK_SPEED_40GB:
3376 case I40E_LINK_SPEED_20GB:
3379 case I40E_LINK_SPEED_25GB:
3382 case I40E_LINK_SPEED_UNKNOWN:
3388 return speeds[index];
3392 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3394 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3395 struct i40e_hw *hw = &pf->hw;
3398 ixl_update_link_status(pf);
3400 error = sysctl_handle_string(oidp,
3401 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3407 * Converts 8-bit speeds value to and from sysctl flags and
3408 * Admin Queue flags.
3411 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3413 static u16 speedmap[6] = {
3414 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
3415 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
3416 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
3417 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
3418 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
3419 (I40E_LINK_SPEED_40GB | (0x20 << 8))
3423 for (int i = 0; i < 6; i++) {
3425 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3427 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3434 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3436 struct i40e_hw *hw = &pf->hw;
3437 device_t dev = pf->dev;
3438 struct i40e_aq_get_phy_abilities_resp abilities;
3439 struct i40e_aq_set_phy_config config;
3440 enum i40e_status_code aq_error = 0;
3442 /* Get current capability information */
3443 aq_error = i40e_aq_get_phy_capabilities(hw,
3444 FALSE, FALSE, &abilities, NULL);
3447 "%s: Error getting phy capabilities %d,"
3448 " aq error: %d\n", __func__, aq_error,
3449 hw->aq.asq_last_status);
3453 /* Prepare new config */
3454 bzero(&config, sizeof(config));
3456 config.link_speed = speeds;
3458 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3459 config.phy_type = abilities.phy_type;
3460 config.phy_type_ext = abilities.phy_type_ext;
3461 config.abilities = abilities.abilities
3462 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3463 config.eee_capability = abilities.eee_capability;
3464 config.eeer = abilities.eeer_val;
3465 config.low_power_ctrl = abilities.d3_lpan;
3466 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3468 /* Do aq command & restart link */
3469 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3472 "%s: Error setting new phy config %d,"
3473 " aq error: %d\n", __func__, aq_error,
3474 hw->aq.asq_last_status);
3482 ** Supported link speedsL
3492 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3494 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3495 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3497 return sysctl_handle_int(oidp, NULL, supported, req);
3501 ** Control link advertise speed:
3503 ** 0x1 - advertise 100 Mb
3504 ** 0x2 - advertise 1G
3505 ** 0x4 - advertise 10G
3506 ** 0x8 - advertise 20G
3507 ** 0x10 - advertise 25G
3508 ** 0x20 - advertise 40G
3510 ** Set to 0 to disable link
3513 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3515 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3516 device_t dev = pf->dev;
3517 u8 converted_speeds;
3518 int requested_ls = 0;
3521 /* Read in new mode */
3522 requested_ls = pf->advertised_speed;
3523 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3524 if ((error) || (req->newptr == NULL))
3527 /* Error out if bits outside of possible flag range are set */
3528 if ((requested_ls & ~((u8)0x3F)) != 0) {
3529 device_printf(dev, "Input advertised speed out of range; "
3530 "valid flags are: 0x%02x\n",
3531 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3535 /* Check if adapter supports input value */
3536 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3537 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3538 device_printf(dev, "Invalid advertised speed; "
3539 "valid flags are: 0x%02x\n",
3540 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3544 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3548 pf->advertised_speed = requested_ls;
3549 ixl_update_link_status(pf);
3554 ** Get the width and transaction speed of
3555 ** the bus this adapter is plugged into.
3558 ixl_get_bus_info(struct ixl_pf *pf)
3560 struct i40e_hw *hw = &pf->hw;
3561 device_t dev = pf->dev;
3563 u32 offset, num_ports;
3566 /* Some devices don't use PCIE */
3567 if (hw->mac.type == I40E_MAC_X722)
3570 /* Read PCI Express Capabilities Link Status Register */
3571 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3572 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3574 /* Fill out hw struct with PCIE info */
3575 i40e_set_pci_config_data(hw, link);
3577 /* Use info to print out bandwidth messages */
3578 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3579 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3580 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3581 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3582 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3583 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3584 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3585 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3589 * If adapter is in slot with maximum supported speed,
3590 * no warning message needs to be printed out.
3592 if (hw->bus.speed >= i40e_bus_speed_8000
3593 && hw->bus.width >= i40e_bus_width_pcie_x8)
3596 num_ports = bitcount32(hw->func_caps.valid_functions);
3597 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3599 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3600 device_printf(dev, "PCI-Express bandwidth available"
3601 " for this device may be insufficient for"
3602 " optimal performance.\n");
3603 device_printf(dev, "Please move the device to a different"
3604 " PCI-e link with more lanes and/or higher"
3605 " transfer rate.\n");
3610 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3612 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3613 struct i40e_hw *hw = &pf->hw;
3616 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3617 ixl_nvm_version_str(hw, sbuf);
3625 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3627 if ((nvma->command == I40E_NVM_READ) &&
3628 ((nvma->config & 0xFF) == 0xF) &&
3629 (((nvma->config & 0xF00) >> 8) == 0xF) &&
3630 (nvma->offset == 0) &&
3631 (nvma->data_size == 1)) {
3632 // device_printf(dev, "- Get Driver Status Command\n");
3634 else if (nvma->command == I40E_NVM_READ) {
3638 switch (nvma->command) {
3640 device_printf(dev, "- command: I40E_NVM_READ\n");
3643 device_printf(dev, "- command: I40E_NVM_WRITE\n");
3646 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3650 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
3651 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3652 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3653 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3658 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3660 struct i40e_hw *hw = &pf->hw;
3661 struct i40e_nvm_access *nvma;
3662 device_t dev = pf->dev;
3663 enum i40e_status_code status = 0;
3664 size_t nvma_size, ifd_len, exp_len;
3667 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3670 nvma_size = sizeof(struct i40e_nvm_access);
3671 ifd_len = ifd->ifd_len;
3673 if (ifd_len < nvma_size ||
3674 ifd->ifd_data == NULL) {
3675 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3677 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3678 __func__, ifd_len, nvma_size);
3679 device_printf(dev, "%s: data pointer: %p\n", __func__,
3684 nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
3685 err = copyin(ifd->ifd_data, nvma, ifd_len);
3687 device_printf(dev, "%s: Cannot get request from user space\n",
3689 free(nvma, M_DEVBUF);
3693 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3694 ixl_print_nvm_cmd(dev, nvma);
3696 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3698 while (count++ < 100) {
3699 i40e_msec_delay(100);
3700 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3705 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3706 free(nvma, M_DEVBUF);
3710 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3711 device_printf(dev, "%s: invalid request, data size not in supported range\n",
3713 free(nvma, M_DEVBUF);
3718 * Older versions of the NVM update tool don't set ifd_len to the size
3719 * of the entire buffer passed to the ioctl. Check the data_size field
3720 * in the contained i40e_nvm_access struct and ensure everything is
3721 * copied in from userspace.
3723 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3725 if (ifd_len < exp_len) {
3727 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
3728 err = copyin(ifd->ifd_data, nvma, ifd_len);
3730 device_printf(dev, "%s: Cannot get request from user space\n",
3732 free(nvma, M_DEVBUF);
3737 // TODO: Might need a different lock here
3739 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3740 // IXL_PF_UNLOCK(pf);
3742 err = copyout(nvma, ifd->ifd_data, ifd_len);
3743 free(nvma, M_DEVBUF);
3745 device_printf(dev, "%s: Cannot return data to user space\n",
3750 /* Let the nvmupdate report errors, show them only when debug is enabled */
3751 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3752 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3753 i40e_stat_str(hw, status), perrno);
3756 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3757 * to run this ioctl again. So use -EACCES for -EPERM instead.
3759 if (perrno == -EPERM)
3766 ixl_find_i2c_interface(struct ixl_pf *pf)
3768 struct i40e_hw *hw = &pf->hw;
3769 bool i2c_en, port_matched;
3772 for (int i = 0; i < 4; i++) {
3773 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3774 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3775 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3776 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3778 if (i2c_en && port_matched)
3786 ixl_phy_type_string(u32 bit_pos, bool ext)
3788 static char * phy_types_str[32] = {
3818 "1000BASE-T Optical",
3822 static char * ext_phy_types_str[8] = {
3833 if (ext && bit_pos > 7) return "Invalid_Ext";
3834 if (bit_pos > 31) return "Invalid";
3836 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3839 /* TODO: ERJ: I don't this is necessary anymore. */
3841 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3843 device_t dev = pf->dev;
3844 struct i40e_hw *hw = &pf->hw;
3845 struct i40e_aq_desc desc;
3846 enum i40e_status_code status;
3848 struct i40e_aqc_get_link_status *aq_link_status =
3849 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3851 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3852 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3853 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3856 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3857 __func__, i40e_stat_str(hw, status),
3858 i40e_aq_str(hw, hw->aq.asq_last_status));
3862 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3867 ixl_phy_type_string_ls(u8 val)
3870 return ixl_phy_type_string(val - 0x1F, true);
3872 return ixl_phy_type_string(val, false);
3876 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3878 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3879 device_t dev = pf->dev;
3883 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3885 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3889 struct i40e_aqc_get_link_status link_status;
3890 error = ixl_aq_get_link_status(pf, &link_status);
3896 sbuf_printf(buf, "\n"
3897 "PHY Type : 0x%02x<%s>\n"
3899 "Link info: 0x%02x\n"
3900 "AN info : 0x%02x\n"
3901 "Ext info : 0x%02x\n"
3902 "Loopback : 0x%02x\n"
3906 link_status.phy_type,
3907 ixl_phy_type_string_ls(link_status.phy_type),
3908 link_status.link_speed,
3909 link_status.link_info,
3910 link_status.an_info,
3911 link_status.ext_info,
3912 link_status.loopback,
3913 link_status.max_frame_size,
3915 link_status.power_desc);
3917 error = sbuf_finish(buf);
3919 device_printf(dev, "Error finishing sbuf: %d\n", error);
3926 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3928 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3929 struct i40e_hw *hw = &pf->hw;
3930 device_t dev = pf->dev;
3931 enum i40e_status_code status;
3932 struct i40e_aq_get_phy_abilities_resp abilities;
3936 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3938 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3942 status = i40e_aq_get_phy_capabilities(hw,
3943 FALSE, FALSE, &abilities, NULL);
3946 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3947 __func__, i40e_stat_str(hw, status),
3948 i40e_aq_str(hw, hw->aq.asq_last_status));
3953 sbuf_printf(buf, "\n"
3955 abilities.phy_type);
3957 if (abilities.phy_type != 0) {
3958 sbuf_printf(buf, "<");
3959 for (int i = 0; i < 32; i++)
3960 if ((1 << i) & abilities.phy_type)
3961 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3962 sbuf_printf(buf, ">\n");
3965 sbuf_printf(buf, "PHY Ext : %02x",
3966 abilities.phy_type_ext);
3968 if (abilities.phy_type_ext != 0) {
3969 sbuf_printf(buf, "<");
3970 for (int i = 0; i < 4; i++)
3971 if ((1 << i) & abilities.phy_type_ext)
3972 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3973 sbuf_printf(buf, ">");
3975 sbuf_printf(buf, "\n");
3983 "ID : %02x %02x %02x %02x\n"
3984 "ModType : %02x %02x %02x\n"
3988 abilities.link_speed,
3989 abilities.abilities, abilities.eee_capability,
3990 abilities.eeer_val, abilities.d3_lpan,
3991 abilities.phy_id[0], abilities.phy_id[1],
3992 abilities.phy_id[2], abilities.phy_id[3],
3993 abilities.module_type[0], abilities.module_type[1],
3994 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3995 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3996 abilities.ext_comp_code);
3998 error = sbuf_finish(buf);
4000 device_printf(dev, "Error finishing sbuf: %d\n", error);
4007 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4009 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4010 struct ixl_vsi *vsi = &pf->vsi;
4011 struct ixl_mac_filter *f;
4012 device_t dev = pf->dev;
4013 int error = 0, ftl_len = 0, ftl_counter = 0;
4017 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4019 device_printf(dev, "Could not allocate sbuf for output.\n");
4023 sbuf_printf(buf, "\n");
4025 /* Print MAC filters */
4026 sbuf_printf(buf, "PF Filters:\n");
4027 SLIST_FOREACH(f, &vsi->ftl, next)
4031 sbuf_printf(buf, "(none)\n");
4033 SLIST_FOREACH(f, &vsi->ftl, next) {
4035 MAC_FORMAT ", vlan %4d, flags %#06x",
4036 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4037 /* don't print '\n' for last entry */
4038 if (++ftl_counter != ftl_len)
4039 sbuf_printf(buf, "\n");
4044 /* TODO: Give each VF its own filter list sysctl */
4046 if (pf->num_vfs > 0) {
4047 sbuf_printf(buf, "\n\n");
4048 for (int i = 0; i < pf->num_vfs; i++) {
4050 if (!(vf->vf_flags & VF_FLAG_ENABLED))
4054 ftl_len = 0, ftl_counter = 0;
4055 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4056 SLIST_FOREACH(f, &vsi->ftl, next)
4060 sbuf_printf(buf, "(none)\n");
4062 SLIST_FOREACH(f, &vsi->ftl, next) {
4064 MAC_FORMAT ", vlan %4d, flags %#06x\n",
4065 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4072 error = sbuf_finish(buf);
4074 device_printf(dev, "Error finishing sbuf: %d\n", error);
4080 #define IXL_SW_RES_SIZE 0x14
4082 ixl_res_alloc_cmp(const void *a, const void *b)
4084 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4085 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4086 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4088 return ((int)one->resource_type - (int)two->resource_type);
4092 * Longest string length: 25
4095 ixl_switch_res_type_string(u8 type)
4097 // TODO: This should be changed to static const
4098 char * ixl_switch_res_type_strings[0x14] = {
4101 "Perfect Match MAC address",
4104 "Multicast hash entry",
4105 "Unicast hash entry",
4109 "VLAN Statistic Pool",
4112 "Inner VLAN Forward filter",
4122 return ixl_switch_res_type_strings[type];
4124 return "(Reserved)";
4128 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4130 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4131 struct i40e_hw *hw = &pf->hw;
4132 device_t dev = pf->dev;
4134 enum i40e_status_code status;
4138 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4140 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4142 device_printf(dev, "Could not allocate sbuf for output.\n");
4146 bzero(resp, sizeof(resp));
4147 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4153 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4154 __func__, i40e_stat_str(hw, status),
4155 i40e_aq_str(hw, hw->aq.asq_last_status));
4160 /* Sort entries by type for display */
4161 qsort(resp, num_entries,
4162 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4163 &ixl_res_alloc_cmp);
4165 sbuf_cat(buf, "\n");
4166 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4168 " Type | Guaranteed | Total | Used | Un-allocated\n"
4169 " | (this) | (all) | (this) | (all) \n");
4170 for (int i = 0; i < num_entries; i++) {
4172 "%25s | %10d %5d %6d %12d",
4173 ixl_switch_res_type_string(resp[i].resource_type),
4177 resp[i].total_unalloced);
4178 if (i < num_entries - 1)
4179 sbuf_cat(buf, "\n");
4182 error = sbuf_finish(buf);
4184 device_printf(dev, "Error finishing sbuf: %d\n", error);
4191 ** Caller must init and delete sbuf; this function will clear and
4192 ** finish it for caller.
4195 ixl_switch_element_string(struct sbuf *s,
4196 struct i40e_aqc_switch_config_element_resp *element)
4200 switch (element->element_type) {
4201 case I40E_AQ_SW_ELEM_TYPE_MAC:
4202 sbuf_printf(s, "MAC %3d", element->element_info);
4204 case I40E_AQ_SW_ELEM_TYPE_PF:
4205 sbuf_printf(s, "PF %3d", element->element_info);
4207 case I40E_AQ_SW_ELEM_TYPE_VF:
4208 sbuf_printf(s, "VF %3d", element->element_info);
4210 case I40E_AQ_SW_ELEM_TYPE_EMP:
4213 case I40E_AQ_SW_ELEM_TYPE_BMC:
4216 case I40E_AQ_SW_ELEM_TYPE_PV:
4219 case I40E_AQ_SW_ELEM_TYPE_VEB:
4222 case I40E_AQ_SW_ELEM_TYPE_PA:
4225 case I40E_AQ_SW_ELEM_TYPE_VSI:
4226 sbuf_printf(s, "VSI %3d", element->element_info);
4234 return sbuf_data(s);
4238 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4240 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4241 struct i40e_hw *hw = &pf->hw;
4242 device_t dev = pf->dev;
4245 enum i40e_status_code status;
4248 u8 aq_buf[I40E_AQ_LARGE_BUF];
4250 struct i40e_aqc_get_switch_config_resp *sw_config;
4251 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4253 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4255 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4259 status = i40e_aq_get_switch_config(hw, sw_config,
4260 sizeof(aq_buf), &next, NULL);
4263 "%s: aq_get_switch_config() error %s, aq error %s\n",
4264 __func__, i40e_stat_str(hw, status),
4265 i40e_aq_str(hw, hw->aq.asq_last_status));
4270 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4273 nmbuf = sbuf_new_auto();
4275 device_printf(dev, "Could not allocate sbuf for name output.\n");
4280 sbuf_cat(buf, "\n");
4281 /* Assuming <= 255 elements in switch */
4282 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4283 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4285 ** Revision -- all elements are revision 1 for now
4288 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4289 " | | | (uplink)\n");
4290 for (int i = 0; i < sw_config->header.num_reported; i++) {
4291 // "%4d (%8s) | %8s %8s %#8x",
4292 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4294 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4295 &sw_config->element[i]));
4296 sbuf_cat(buf, " | ");
4297 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4299 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4301 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4302 if (i < sw_config->header.num_reported - 1)
4303 sbuf_cat(buf, "\n");
4307 error = sbuf_finish(buf);
4309 device_printf(dev, "Error finishing sbuf: %d\n", error);
4317 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4319 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4320 struct i40e_hw *hw = &pf->hw;
4321 device_t dev = pf->dev;
4324 enum i40e_status_code status;
4327 struct i40e_aqc_get_set_rss_key_data key_data;
4329 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4331 device_printf(dev, "Could not allocate sbuf for output.\n");
4335 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4337 sbuf_cat(buf, "\n");
4338 if (hw->mac.type == I40E_MAC_X722) {
4339 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4341 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4342 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4344 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4345 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4346 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4350 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4352 error = sbuf_finish(buf);
4354 device_printf(dev, "Error finishing sbuf: %d\n", error);
4361 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4366 if (length < 1 || buf == NULL) return;
4368 int byte_stride = 16;
4369 int lines = length / byte_stride;
4370 int rem = length % byte_stride;
4374 for (i = 0; i < lines; i++) {
4375 width = (rem > 0 && i == lines - 1)
4376 ? rem : byte_stride;
4378 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4380 for (j = 0; j < width; j++)
4381 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4383 if (width < byte_stride) {
4384 for (k = 0; k < (byte_stride - width); k++)
4385 sbuf_printf(sb, " ");
4389 sbuf_printf(sb, "\n");
4393 for (j = 0; j < width; j++) {
4394 c = (char)buf[i * byte_stride + j];
4395 if (c < 32 || c > 126)
4396 sbuf_printf(sb, ".");
4398 sbuf_printf(sb, "%c", c);
4401 sbuf_printf(sb, "\n");
4407 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4409 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4410 struct i40e_hw *hw = &pf->hw;
4411 device_t dev = pf->dev;
4414 enum i40e_status_code status;
4418 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4420 device_printf(dev, "Could not allocate sbuf for output.\n");
4424 bzero(hlut, sizeof(hlut));
4425 sbuf_cat(buf, "\n");
4426 if (hw->mac.type == I40E_MAC_X722) {
4427 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4429 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4430 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4432 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4433 reg = rd32(hw, I40E_PFQF_HLUT(i));
4434 bcopy(®, &hlut[i << 2], 4);
4437 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4439 error = sbuf_finish(buf);
4441 device_printf(dev, "Error finishing sbuf: %d\n", error);
4448 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4450 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4451 struct i40e_hw *hw = &pf->hw;
4454 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4455 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4457 return sysctl_handle_long(oidp, NULL, hena, req);
4461 * Sysctl to disable firmware's link management
4463 * 1 - Disable link management on this port
4464 * 0 - Re-enable link management
4466 * On normal NVMs, firmware manages link by default.
4469 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4471 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4472 struct i40e_hw *hw = &pf->hw;
4473 device_t dev = pf->dev;
4474 int requested_mode = -1;
4475 enum i40e_status_code status = 0;
4478 /* Read in new mode */
4479 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4480 if ((error) || (req->newptr == NULL))
4482 /* Check for sane value */
4483 if (requested_mode < 0 || requested_mode > 1) {
4484 device_printf(dev, "Valid modes are 0 or 1\n");
4489 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4492 "%s: Error setting new phy debug mode %s,"
4493 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4494 i40e_aq_str(hw, hw->aq.asq_last_status));
4502 * Read some diagnostic data from an SFP module
4503 * Bytes 96-99, 102-105 from device address 0xA2
4506 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4508 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4509 device_t dev = pf->dev;
4514 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4516 device_printf(dev, "Error reading from i2c\n");
4519 if (output != 0x3) {
4520 device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4524 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4525 if (!(output & 0x60)) {
4526 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4530 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4532 for (u8 offset = 96; offset < 100; offset++) {
4533 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4534 sbuf_printf(sbuf, "%02X ", output);
4536 for (u8 offset = 102; offset < 106; offset++) {
4537 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4538 sbuf_printf(sbuf, "%02X ", output);
4548 * Sysctl to read a byte from I2C bus.
4550 * Input: 32-bit value:
4551 * bits 0-7: device address (0xA0 or 0xA2)
4552 * bits 8-15: offset (0-255)
4553 * bits 16-31: unused
4554 * Output: 8-bit value read
4557 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4559 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4560 device_t dev = pf->dev;
4561 int input = -1, error = 0;
4562 u8 dev_addr, offset, output;
4564 /* Read in I2C read parameters */
4565 error = sysctl_handle_int(oidp, &input, 0, req);
4566 if ((error) || (req->newptr == NULL))
4568 /* Validate device address */
4569 dev_addr = input & 0xFF;
4570 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4573 offset = (input >> 8) & 0xFF;
4575 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4579 device_printf(dev, "%02X\n", output);
4584 * Sysctl to write a byte to the I2C bus.
4586 * Input: 32-bit value:
4587 * bits 0-7: device address (0xA0 or 0xA2)
4588 * bits 8-15: offset (0-255)
4589 * bits 16-23: value to write
4590 * bits 24-31: unused
4591 * Output: 8-bit value written
4594 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4596 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4597 device_t dev = pf->dev;
4598 int input = -1, error = 0;
4599 u8 dev_addr, offset, value;
4601 /* Read in I2C write parameters */
4602 error = sysctl_handle_int(oidp, &input, 0, req);
4603 if ((error) || (req->newptr == NULL))
4605 /* Validate device address */
4606 dev_addr = input & 0xFF;
4607 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4610 offset = (input >> 8) & 0xFF;
4611 value = (input >> 16) & 0xFF;
4613 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4617 device_printf(dev, "%02X written\n", value);
4622 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4623 u8 bit_pos, int *is_set)
4625 device_t dev = pf->dev;
4626 struct i40e_hw *hw = &pf->hw;
4627 enum i40e_status_code status;
4629 status = i40e_aq_get_phy_capabilities(hw,
4630 FALSE, FALSE, abilities, NULL);
4633 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4634 __func__, i40e_stat_str(hw, status),
4635 i40e_aq_str(hw, hw->aq.asq_last_status));
4639 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4644 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4645 u8 bit_pos, int set)
4647 device_t dev = pf->dev;
4648 struct i40e_hw *hw = &pf->hw;
4649 struct i40e_aq_set_phy_config config;
4650 enum i40e_status_code status;
4652 /* Set new PHY config */
4653 memset(&config, 0, sizeof(config));
4654 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4656 config.fec_config |= bit_pos;
4657 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4658 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4659 config.phy_type = abilities->phy_type;
4660 config.phy_type_ext = abilities->phy_type_ext;
4661 config.link_speed = abilities->link_speed;
4662 config.eee_capability = abilities->eee_capability;
4663 config.eeer = abilities->eeer_val;
4664 config.low_power_ctrl = abilities->d3_lpan;
4665 status = i40e_aq_set_phy_config(hw, &config, NULL);
4669 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4670 __func__, i40e_stat_str(hw, status),
4671 i40e_aq_str(hw, hw->aq.asq_last_status));
4680 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4682 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4683 int mode, error = 0;
4685 struct i40e_aq_get_phy_abilities_resp abilities;
4686 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4689 /* Read in new mode */
4690 error = sysctl_handle_int(oidp, &mode, 0, req);
4691 if ((error) || (req->newptr == NULL))
4694 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4698 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4700 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4701 int mode, error = 0;
4703 struct i40e_aq_get_phy_abilities_resp abilities;
4704 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4707 /* Read in new mode */
4708 error = sysctl_handle_int(oidp, &mode, 0, req);
4709 if ((error) || (req->newptr == NULL))
4712 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4716 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4718 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4719 int mode, error = 0;
4721 struct i40e_aq_get_phy_abilities_resp abilities;
4722 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4725 /* Read in new mode */
4726 error = sysctl_handle_int(oidp, &mode, 0, req);
4727 if ((error) || (req->newptr == NULL))
4730 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4734 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4736 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4737 int mode, error = 0;
4739 struct i40e_aq_get_phy_abilities_resp abilities;
4740 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4743 /* Read in new mode */
4744 error = sysctl_handle_int(oidp, &mode, 0, req);
4745 if ((error) || (req->newptr == NULL))
4748 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4752 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4754 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4755 int mode, error = 0;
4757 struct i40e_aq_get_phy_abilities_resp abilities;
4758 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4761 /* Read in new mode */
4762 error = sysctl_handle_int(oidp, &mode, 0, req);
4763 if ((error) || (req->newptr == NULL))
4766 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4770 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4772 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4773 struct i40e_hw *hw = &pf->hw;
4774 device_t dev = pf->dev;
4777 enum i40e_status_code status;
4779 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4781 device_printf(dev, "Could not allocate sbuf for output.\n");
4786 /* This amount is only necessary if reading the entire cluster into memory */
4787 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4788 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4789 if (final_buff == NULL) {
4790 device_printf(dev, "Could not allocate memory for output.\n");
4793 int final_buff_len = 0;
4799 u16 curr_buff_size = 4096;
4800 u8 curr_next_table = 0;
4801 u32 curr_next_index = 0;
4807 sbuf_cat(buf, "\n");
4810 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4811 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4813 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4814 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4818 /* copy info out of temp buffer */
4819 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4820 final_buff_len += ret_buff_size;
4822 if (ret_next_table != curr_next_table) {
4823 /* We're done with the current table; we can dump out read data. */
4824 sbuf_printf(buf, "%d:", curr_next_table);
4825 int bytes_printed = 0;
4826 while (bytes_printed <= final_buff_len) {
4827 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4828 bytes_printed += 16;
4830 sbuf_cat(buf, "\n");
4832 /* The entire cluster has been read; we're finished */
4833 if (ret_next_table == 0xFF)
4836 /* Otherwise clear the output buffer and continue reading */
4837 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4841 if (ret_next_index == 0xFFFFFFFF)
4844 bzero(dump_buf, sizeof(dump_buf));
4845 curr_next_table = ret_next_table;
4846 curr_next_index = ret_next_index;
4850 free(final_buff, M_DEVBUF);
4852 error = sbuf_finish(buf);
4854 device_printf(dev, "Error finishing sbuf: %d\n", error);
4861 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4863 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4864 struct i40e_hw *hw = &pf->hw;
4865 device_t dev = pf->dev;
4867 int state, new_state;
4868 enum i40e_status_code status;
4869 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4871 /* Read in new mode */
4872 error = sysctl_handle_int(oidp, &new_state, 0, req);
4873 if ((error) || (req->newptr == NULL))
4876 /* Already in requested state */
4877 if (new_state == state)
4880 if (new_state == 0) {
4881 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4882 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4886 if (pf->hw.aq.api_maj_ver < 1 ||
4887 (pf->hw.aq.api_maj_ver == 1 &&
4888 pf->hw.aq.api_min_ver < 7)) {
4889 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4893 i40e_aq_stop_lldp(&pf->hw, true, NULL);
4894 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4895 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4897 status = i40e_aq_start_lldp(&pf->hw, NULL);
4898 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4899 device_printf(dev, "FW LLDP agent is already running\n");
4900 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4907 * Get FW LLDP Agent status
4910 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4912 enum i40e_status_code ret = I40E_SUCCESS;
4913 struct i40e_lldp_variables lldp_cfg;
4914 struct i40e_hw *hw = &pf->hw;
4917 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4921 /* Get the LLDP AdminStatus for the current port */
4922 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4925 /* Check if LLDP agent is disabled */
4927 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4928 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4930 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4936 ixl_attach_get_link_status(struct ixl_pf *pf)
4938 struct i40e_hw *hw = &pf->hw;
4939 device_t dev = pf->dev;
4942 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4943 (hw->aq.fw_maj_ver < 4)) {
4944 i40e_msec_delay(75);
4945 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4947 device_printf(dev, "link restart failed, aq_err=%d\n",
4948 pf->hw.aq.asq_last_status);
4953 /* Determine link state */
4954 hw->phy.get_link_info = TRUE;
4955 i40e_get_link_status(hw, &pf->link_up);
4960 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4962 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4963 int requested = 0, error = 0;
4965 /* Read in new mode */
4966 error = sysctl_handle_int(oidp, &requested, 0, req);
4967 if ((error) || (req->newptr == NULL))
4970 /* Initiate the PF reset later in the admin task */
4971 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4977 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4979 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4980 struct i40e_hw *hw = &pf->hw;
4981 int requested = 0, error = 0;
4983 /* Read in new mode */
4984 error = sysctl_handle_int(oidp, &requested, 0, req);
4985 if ((error) || (req->newptr == NULL))
4988 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4994 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4996 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4997 struct i40e_hw *hw = &pf->hw;
4998 int requested = 0, error = 0;
5000 /* Read in new mode */
5001 error = sysctl_handle_int(oidp, &requested, 0, req);
5002 if ((error) || (req->newptr == NULL))
5005 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
5011 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
5013 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5014 struct i40e_hw *hw = &pf->hw;
5015 int requested = 0, error = 0;
5017 /* Read in new mode */
5018 error = sysctl_handle_int(oidp, &requested, 0, req);
5019 if ((error) || (req->newptr == NULL))
5022 /* TODO: Find out how to bypass this */
5023 if (!(rd32(hw, 0x000B818C) & 0x1)) {
5024 device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
5027 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
5033 * Print out mapping of TX queue indexes and Rx queue indexes
5037 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
5039 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5040 struct ixl_vsi *vsi = &pf->vsi;
5041 device_t dev = pf->dev;
5045 struct ixl_rx_queue *rx_que = vsi->rx_queues;
5046 struct ixl_tx_queue *tx_que = vsi->tx_queues;
5048 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5050 device_printf(dev, "Could not allocate sbuf for output.\n");
5054 sbuf_cat(buf, "\n");
5055 for (int i = 0; i < vsi->num_rx_queues; i++) {
5056 rx_que = &vsi->rx_queues[i];
5057 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5059 for (int i = 0; i < vsi->num_tx_queues; i++) {
5060 tx_que = &vsi->tx_queues[i];
5061 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5064 error = sbuf_finish(buf);
5066 device_printf(dev, "Error finishing sbuf: %d\n", error);