1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
51 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
91 extern int ixl_enable_iwarp;
92 extern int ixl_limit_iwarp_msix;
95 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
96 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 const char * const ixl_fc_string[6] = {
107 static char *ixl_fec_string[3] = {
109 "CL74 FC-FEC/BASE-R",
113 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
119 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
121 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
122 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
123 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
126 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
127 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
128 hw->aq.api_maj_ver, hw->aq.api_min_ver,
129 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
130 IXL_NVM_VERSION_HI_SHIFT,
131 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
132 IXL_NVM_VERSION_LO_SHIFT,
134 oem_ver, oem_build, oem_patch);
138 ixl_print_nvm_version(struct ixl_pf *pf)
140 struct i40e_hw *hw = &pf->hw;
141 device_t dev = pf->dev;
144 sbuf = sbuf_new_auto();
145 ixl_nvm_version_str(hw, sbuf);
147 device_printf(dev, "%s\n", sbuf_data(sbuf));
152 ixl_configure_tx_itr(struct ixl_pf *pf)
154 struct i40e_hw *hw = &pf->hw;
155 struct ixl_vsi *vsi = &pf->vsi;
156 struct ixl_tx_queue *que = vsi->tx_queues;
158 vsi->tx_itr_setting = pf->tx_itr;
160 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
161 struct tx_ring *txr = &que->txr;
163 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
164 vsi->tx_itr_setting);
165 txr->itr = vsi->tx_itr_setting;
166 txr->latency = IXL_AVE_LATENCY;
171 ixl_configure_rx_itr(struct ixl_pf *pf)
173 struct i40e_hw *hw = &pf->hw;
174 struct ixl_vsi *vsi = &pf->vsi;
175 struct ixl_rx_queue *que = vsi->rx_queues;
177 vsi->rx_itr_setting = pf->rx_itr;
179 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
180 struct rx_ring *rxr = &que->rxr;
182 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
183 vsi->rx_itr_setting);
184 rxr->itr = vsi->rx_itr_setting;
185 rxr->latency = IXL_AVE_LATENCY;
190 * Write PF ITR values to queue ITR registers.
193 ixl_configure_itr(struct ixl_pf *pf)
195 ixl_configure_tx_itr(pf);
196 ixl_configure_rx_itr(pf);
199 /*********************************************************************
201 * Get the hardware capabilities
203 **********************************************************************/
206 ixl_get_hw_capabilities(struct ixl_pf *pf)
208 struct i40e_aqc_list_capabilities_element_resp *buf;
209 struct i40e_hw *hw = &pf->hw;
210 device_t dev = pf->dev;
211 enum i40e_status_code status;
212 int len, i2c_intfc_num;
216 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
218 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
219 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
220 device_printf(dev, "Unable to allocate cap memory\n");
224 /* This populates the hw struct */
225 status = i40e_aq_discover_capabilities(hw, buf, len,
226 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
228 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
230 /* retry once with a larger buffer */
234 } else if (status != I40E_SUCCESS) {
235 device_printf(dev, "capability discovery failed; status %s, error %s\n",
236 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
241 * Some devices have both MDIO and I2C; since this isn't reported
242 * by the FW, check registers to see if an I2C interface exists.
244 i2c_intfc_num = ixl_find_i2c_interface(pf);
245 if (i2c_intfc_num != -1)
248 /* Determine functions to use for driver I2C accesses */
249 switch (pf->i2c_access_method) {
251 if (hw->mac.type == I40E_MAC_XL710 &&
252 hw->aq.api_maj_ver == 1 &&
253 hw->aq.api_min_ver >= 7) {
254 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
255 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
257 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
258 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
263 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
264 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
267 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
268 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
271 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
272 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
275 /* Should not happen */
276 device_printf(dev, "Error setting I2C access functions\n");
280 /* Print a subset of the capability information. */
281 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
282 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
283 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
284 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
285 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
286 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
292 /* For the set_advertise sysctl */
294 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
296 device_t dev = pf->dev;
299 /* Make sure to initialize the device to the complete list of
300 * supported speeds on driver load, to ensure unloading and
301 * reloading the driver will restore this value.
303 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
305 /* Non-fatal error */
306 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
311 pf->advertised_speed =
312 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
316 ixl_teardown_hw_structs(struct ixl_pf *pf)
318 enum i40e_status_code status = 0;
319 struct i40e_hw *hw = &pf->hw;
320 device_t dev = pf->dev;
322 /* Shutdown LAN HMC */
323 if (hw->hmc.hmc_obj) {
324 status = i40e_shutdown_lan_hmc(hw);
327 "init: LAN HMC shutdown failure; status %s\n",
328 i40e_stat_str(hw, status));
333 /* Shutdown admin queue */
334 ixl_disable_intr0(hw);
335 status = i40e_shutdown_adminq(hw);
338 "init: Admin Queue shutdown failure; status %s\n",
339 i40e_stat_str(hw, status));
341 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
347 ixl_reset(struct ixl_pf *pf)
349 struct i40e_hw *hw = &pf->hw;
350 device_t dev = pf->dev;
354 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
356 error = i40e_pf_reset(hw);
358 device_printf(dev, "init: PF reset failure\n");
363 error = i40e_init_adminq(hw);
365 device_printf(dev, "init: Admin queue init failure;"
366 " status code %d\n", error);
371 i40e_clear_pxe_mode(hw);
374 error = ixl_get_hw_capabilities(pf);
376 device_printf(dev, "init: Error retrieving HW capabilities;"
377 " status code %d\n", error);
381 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
382 hw->func_caps.num_rx_qp, 0, 0);
384 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
390 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
392 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
398 // XXX: possible fix for panic, but our failure recovery is still broken
399 error = ixl_switch_config(pf);
401 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
406 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
409 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
410 " aq_err %d\n", error, hw->aq.asq_last_status);
415 error = i40e_set_fc(hw, &set_fc_err_mask, true);
417 device_printf(dev, "init: setting link flow control failed; retcode %d,"
418 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
422 // XXX: (Rebuild VSIs?)
424 /* Firmware delay workaround */
425 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
426 (hw->aq.fw_maj_ver < 4)) {
428 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
430 device_printf(dev, "init: link restart failed, aq_err %d\n",
431 hw->aq.asq_last_status);
437 /* Re-enable admin queue interrupt */
439 ixl_configure_intr0_msix(pf);
440 ixl_enable_intr0(hw);
446 ixl_rebuild_hw_structs_after_reset(pf);
448 /* The PF reset should have cleared any critical errors */
449 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
450 atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
452 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
453 reg |= IXL_ICR0_CRIT_ERR_MASK;
454 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
461 * TODO: Make sure this properly handles admin queue / single rx queue intr
466 struct ixl_pf *pf = arg;
467 struct i40e_hw *hw = &pf->hw;
468 struct ixl_vsi *vsi = &pf->vsi;
469 struct ixl_rx_queue *que = vsi->rx_queues;
475 // TODO: Check against proper field
477 /* Clear PBA at start of ISR if using legacy interrupts */
479 wr32(hw, I40E_PFINT_DYN_CTL0,
480 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
481 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
484 icr0 = rd32(hw, I40E_PFINT_ICR0);
488 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
489 iflib_iov_intr_deferred(vsi->ctx);
492 // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
493 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
494 iflib_admin_intr_deferred(vsi->ctx);
496 // TODO: Is intr0 enabled somewhere else?
497 ixl_enable_intr0(hw);
499 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
500 return (FILTER_SCHEDULE_THREAD);
502 return (FILTER_HANDLED);
506 /*********************************************************************
508 * MSIX VSI Interrupt Service routine
510 **********************************************************************/
512 ixl_msix_que(void *arg)
514 struct ixl_rx_queue *rx_que = arg;
518 ixl_set_queue_rx_itr(rx_que);
519 // ixl_set_queue_tx_itr(que);
521 return (FILTER_SCHEDULE_THREAD);
525 /*********************************************************************
527 * MSIX Admin Queue Interrupt Service routine
529 **********************************************************************/
531 ixl_msix_adminq(void *arg)
533 struct ixl_pf *pf = arg;
534 struct i40e_hw *hw = &pf->hw;
535 device_t dev = pf->dev;
536 u32 reg, mask, rstat_reg;
537 bool do_task = FALSE;
539 DDPRINTF(dev, "begin");
543 reg = rd32(hw, I40E_PFINT_ICR0);
545 * For masking off interrupt causes that need to be handled before
546 * they can be re-enabled
548 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
550 /* Check on the cause */
551 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
552 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
556 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
557 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
558 atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
562 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
563 mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
564 device_printf(dev, "Reset Requested!\n");
565 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
566 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
567 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
568 device_printf(dev, "Reset type: ");
570 /* These others might be handled similarly to an EMPR reset */
571 case I40E_RESET_CORER:
574 case I40E_RESET_GLOBR:
577 case I40E_RESET_EMPR:
584 /* overload admin queue task to check reset progress */
585 atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
590 * PE / PCI / ECC exceptions are all handled in the same way:
591 * mask out these three causes, then request a PF reset
593 * TODO: I think at least ECC error requires a GLOBR, not PFR
595 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
596 device_printf(dev, "ECC Error detected!\n");
597 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
598 device_printf(dev, "PCI Exception detected!\n");
599 if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
600 device_printf(dev, "Critical Protocol Engine Error detected!\n");
601 /* Checks against the conditions above */
602 if (reg & IXL_ICR0_CRIT_ERR_MASK) {
603 mask &= ~IXL_ICR0_CRIT_ERR_MASK;
604 atomic_set_32(&pf->state,
605 IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
609 // TODO: Linux driver never re-enables this interrupt once it has been detected
610 // Then what is supposed to happen? A PF reset? Should it never happen?
611 // TODO: Parse out this error into something human readable
612 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
613 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
614 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
615 device_printf(dev, "HMC Error detected!\n");
616 device_printf(dev, "INFO 0x%08x\n", reg);
617 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
618 device_printf(dev, "DATA 0x%08x\n", reg);
619 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
624 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
625 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
626 iflib_iov_intr_deferred(pf->vsi.ctx);
630 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
631 ixl_enable_intr0(hw);
634 return (FILTER_SCHEDULE_THREAD);
636 return (FILTER_HANDLED);
639 /*********************************************************************
642 * Routines for multicast and vlan filter management.
644 *********************************************************************/
646 ixl_add_multi(struct ixl_vsi *vsi)
648 struct ifmultiaddr *ifma;
649 struct ifnet *ifp = vsi->ifp;
650 struct i40e_hw *hw = vsi->hw;
653 IOCTL_DEBUGOUT("ixl_add_multi: begin");
657 ** First just get a count, to decide if we
658 ** we simply use multicast promiscuous.
660 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
661 if (ifma->ifma_addr->sa_family != AF_LINK)
665 if_maddr_runlock(ifp);
667 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
668 /* delete existing MC filters */
669 ixl_del_hw_filters(vsi, mcnt);
670 i40e_aq_set_vsi_multicast_promiscuous(hw,
671 vsi->seid, TRUE, NULL);
677 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
678 if (ifma->ifma_addr->sa_family != AF_LINK)
680 ixl_add_mc_filter(vsi,
681 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
684 if_maddr_runlock(ifp);
686 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
687 ixl_add_hw_filters(vsi, flags, mcnt);
690 IOCTL_DEBUGOUT("ixl_add_multi: end");
694 ixl_del_multi(struct ixl_vsi *vsi)
696 struct ifnet *ifp = vsi->ifp;
697 struct ifmultiaddr *ifma;
698 struct ixl_mac_filter *f;
702 IOCTL_DEBUGOUT("ixl_del_multi: begin");
704 /* Search for removed multicast addresses */
706 SLIST_FOREACH(f, &vsi->ftl, next) {
707 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
709 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
710 if (ifma->ifma_addr->sa_family != AF_LINK)
712 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
713 if (cmp_etheraddr(f->macaddr, mc_addr)) {
718 if (match == FALSE) {
719 f->flags |= IXL_FILTER_DEL;
724 if_maddr_runlock(ifp);
727 ixl_del_hw_filters(vsi, mcnt);
733 ixl_link_up_msg(struct ixl_pf *pf)
735 struct i40e_hw *hw = &pf->hw;
736 struct ifnet *ifp = pf->vsi.ifp;
737 char *req_fec_string, *neg_fec_string;
740 fec_abilities = hw->phy.link_info.req_fec_info;
741 /* If both RS and KR are requested, only show RS */
742 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
743 req_fec_string = ixl_fec_string[0];
744 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
745 req_fec_string = ixl_fec_string[1];
747 req_fec_string = ixl_fec_string[2];
749 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
750 neg_fec_string = ixl_fec_string[0];
751 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
752 neg_fec_string = ixl_fec_string[1];
754 neg_fec_string = ixl_fec_string[2];
756 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
758 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
759 req_fec_string, neg_fec_string,
760 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
761 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
762 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
763 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
764 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
765 ixl_fc_string[1] : ixl_fc_string[0]);
769 * Configure admin queue/misc interrupt cause registers in hardware.
772 ixl_configure_intr0_msix(struct ixl_pf *pf)
774 struct i40e_hw *hw = &pf->hw;
777 /* First set up the adminq - vector 0 */
778 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
779 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
781 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
782 I40E_PFINT_ICR0_ENA_GRST_MASK |
783 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
784 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
785 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
786 I40E_PFINT_ICR0_ENA_VFLR_MASK |
787 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
788 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
789 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
792 * 0x7FF is the end of the queue list.
793 * This means we won't use MSI-X vector 0 for a queue interrupt
796 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
797 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
798 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
800 wr32(hw, I40E_PFINT_DYN_CTL0,
801 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
802 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
804 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
808 * Configure queue interrupt cause registers in hardware.
810 * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
813 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
815 struct i40e_hw *hw = &pf->hw;
816 struct ixl_vsi *vsi = &pf->vsi;
820 // TODO: See if max is really necessary
821 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
822 /* Make sure interrupt is disabled */
823 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
824 /* Set linked list head to point to corresponding RX queue
825 * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
826 reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
827 & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
828 ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
829 & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
830 wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
832 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
833 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
834 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
835 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
836 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
837 wr32(hw, I40E_QINT_RQCTL(i), reg);
839 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
840 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
841 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
842 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
843 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
844 wr32(hw, I40E_QINT_TQCTL(i), reg);
849 * Configure for single interrupt vector operation
852 ixl_configure_legacy(struct ixl_pf *pf)
854 struct i40e_hw *hw = &pf->hw;
855 struct ixl_vsi *vsi = &pf->vsi;
861 vsi->tx_itr_setting = pf->tx_itr;
862 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
863 vsi->tx_itr_setting);
864 txr->itr = vsi->tx_itr_setting;
866 vsi->rx_itr_setting = pf->rx_itr;
867 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
868 vsi->rx_itr_setting);
869 rxr->itr = vsi->rx_itr_setting;
870 /* XXX: Assuming only 1 queue in single interrupt mode */
872 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
874 /* Setup "other" causes */
875 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
876 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
877 | I40E_PFINT_ICR0_ENA_GRST_MASK
878 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
879 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
880 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
881 | I40E_PFINT_ICR0_ENA_VFLR_MASK
882 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
884 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
886 /* No ITR for non-queue interrupts */
887 wr32(hw, I40E_PFINT_STAT_CTL0,
888 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
890 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
891 wr32(hw, I40E_PFINT_LNKLST0, 0);
893 /* Associate the queue pair to the vector and enable the q int */
894 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
895 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
896 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
897 wr32(hw, I40E_QINT_RQCTL(0), reg);
899 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
900 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
901 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
902 wr32(hw, I40E_QINT_TQCTL(0), reg);
906 ixl_free_pci_resources(struct ixl_pf *pf)
908 struct ixl_vsi *vsi = &pf->vsi;
909 device_t dev = iflib_get_dev(vsi->ctx);
910 struct ixl_rx_queue *rx_que = vsi->rx_queues;
912 /* We may get here before stations are setup */
917 ** Release all msix VSI resources:
919 iflib_irq_free(vsi->ctx, &vsi->irq);
921 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
922 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
924 if (pf->pci_mem != NULL)
925 bus_release_resource(dev, SYS_RES_MEMORY,
926 PCIR_BAR(0), pf->pci_mem);
930 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
932 /* Display supported media types */
933 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
934 ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
936 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
937 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
938 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
939 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
940 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
941 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
943 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
944 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
945 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
946 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
948 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
949 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
950 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
951 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
952 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
953 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
955 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
956 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
957 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
958 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
959 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
960 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
961 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
962 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
963 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
964 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
966 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
967 ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
969 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
970 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
971 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
972 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
973 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
974 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
975 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
976 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
977 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
978 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
979 ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
981 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
982 ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
984 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
985 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
986 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
987 ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
989 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
990 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
991 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
992 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
993 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
994 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
995 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
996 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
997 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
998 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
999 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1000 ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1003 /*********************************************************************
1005 * Setup networking device structure and register an interface.
1007 **********************************************************************/
1009 ixl_setup_interface(device_t dev, struct ixl_pf *pf)
1011 struct ixl_vsi *vsi = &pf->vsi;
1012 if_ctx_t ctx = vsi->ctx;
1013 struct i40e_hw *hw = &pf->hw;
1014 struct ifnet *ifp = iflib_get_ifp(ctx);
1015 struct i40e_aq_get_phy_abilities_resp abilities;
1016 enum i40e_status_code aq_error = 0;
1018 INIT_DBG_DEV(dev, "begin");
1020 vsi->shared->isc_max_frame_size =
1021 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1022 + ETHER_VLAN_ENCAP_LEN;
1024 aq_error = i40e_aq_get_phy_capabilities(hw,
1025 FALSE, TRUE, &abilities, NULL);
1026 /* May need delay to detect fiber correctly */
1027 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1028 /* TODO: Maybe just retry this in a task... */
1029 i40e_msec_delay(200);
1030 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1031 TRUE, &abilities, NULL);
1034 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1035 device_printf(dev, "Unknown PHY type detected!\n");
1038 "Error getting supported media types, err %d,"
1039 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1041 pf->supported_speeds = abilities.link_speed;
1042 #if __FreeBSD_version >= 1100000
1043 if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1045 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1048 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1051 /* Use autoselect media by default */
1052 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1053 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1059 * Input: bitmap of enum i40e_aq_link_speed
1062 ixl_max_aq_speed_to_value(u8 link_speeds)
1064 if (link_speeds & I40E_LINK_SPEED_40GB)
1066 if (link_speeds & I40E_LINK_SPEED_25GB)
1068 if (link_speeds & I40E_LINK_SPEED_20GB)
1070 if (link_speeds & I40E_LINK_SPEED_10GB)
1072 if (link_speeds & I40E_LINK_SPEED_1GB)
1074 if (link_speeds & I40E_LINK_SPEED_100MB)
1075 return IF_Mbps(100);
1077 /* Minimum supported link speed */
1078 return IF_Mbps(100);
1082 ** Run when the Admin Queue gets a link state change interrupt.
1085 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1087 struct i40e_hw *hw = &pf->hw;
1088 device_t dev = iflib_get_dev(pf->vsi.ctx);
1089 struct i40e_aqc_get_link_status *status =
1090 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1092 /* Request link status from adapter */
1093 hw->phy.get_link_info = TRUE;
1094 i40e_get_link_status(hw, &pf->link_up);
1096 /* Print out message if an unqualified module is found */
1097 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1098 (pf->advertised_speed) &&
1099 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1100 (!(status->link_info & I40E_AQ_LINK_UP)))
1101 device_printf(dev, "Link failed because "
1102 "an unqualified module was detected!\n");
1104 /* OS link info is updated elsewhere */
1107 /*********************************************************************
1109 * Get Firmware Switch configuration
1110 * - this will need to be more robust when more complex
1111 * switch configurations are enabled.
1113 **********************************************************************/
1115 ixl_switch_config(struct ixl_pf *pf)
1117 struct i40e_hw *hw = &pf->hw;
1118 struct ixl_vsi *vsi = &pf->vsi;
1119 device_t dev = iflib_get_dev(vsi->ctx);
1120 struct i40e_aqc_get_switch_config_resp *sw_config;
1121 u8 aq_buf[I40E_AQ_LARGE_BUF];
1125 memset(&aq_buf, 0, sizeof(aq_buf));
1126 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1127 ret = i40e_aq_get_switch_config(hw, sw_config,
1128 sizeof(aq_buf), &next, NULL);
1130 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1131 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1134 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1136 "Switch config: header reported: %d in structure, %d total\n",
1137 sw_config->header.num_reported, sw_config->header.num_total);
1138 for (int i = 0; i < sw_config->header.num_reported; i++) {
1140 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1141 sw_config->element[i].element_type,
1142 sw_config->element[i].seid,
1143 sw_config->element[i].uplink_seid,
1144 sw_config->element[i].downlink_seid);
1147 /* Simplified due to a single VSI */
1148 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1149 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1150 vsi->seid = sw_config->element[0].seid;
1154 /*********************************************************************
1156 * Initialize the VSI: this handles contexts, which means things
1157 * like the number of descriptors, buffer size,
1158 * plus we init the rings thru this function.
1160 **********************************************************************/
1162 ixl_initialize_vsi(struct ixl_vsi *vsi)
1164 struct ixl_pf *pf = vsi->back;
1165 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
1166 struct ixl_tx_queue *tx_que = vsi->tx_queues;
1167 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1168 device_t dev = iflib_get_dev(vsi->ctx);
1169 struct i40e_hw *hw = vsi->hw;
1170 struct i40e_vsi_context ctxt;
1174 memset(&ctxt, 0, sizeof(ctxt));
1175 ctxt.seid = vsi->seid;
1176 if (pf->veb_seid != 0)
1177 ctxt.uplink_seid = pf->veb_seid;
1178 ctxt.pf_num = hw->pf_id;
1179 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1181 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1182 " aq_error %d\n", err, hw->aq.asq_last_status);
1185 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1186 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1187 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1188 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1189 ctxt.uplink_seid, ctxt.vsi_number,
1190 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1191 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1192 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1194 ** Set the queue and traffic class bits
1195 ** - when multiple traffic classes are supported
1196 ** this will need to be more robust.
1198 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1199 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1200 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1201 ctxt.info.queue_mapping[0] = 0;
1203 * This VSI will only use traffic class 0; start traffic class 0's
1204 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1205 * the driver may not use all of them).
1207 tc_queues = fls(pf->qtag.num_allocated) - 1;
1208 ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1209 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1210 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
1211 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
1213 /* Set VLAN receive stripping mode */
1214 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
1215 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
1216 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
1217 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1219 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1222 /* Set TCP Enable for iWARP capable VSI */
1223 if (ixl_enable_iwarp && pf->iw_enabled) {
1224 ctxt.info.valid_sections |=
1225 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
1226 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
1229 /* Save VSI number and info for use later */
1230 vsi->vsi_num = ctxt.vsi_number;
1231 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1233 /* Reset VSI statistics */
1234 ixl_vsi_reset_stats(vsi);
1235 vsi->hw_filters_add = 0;
1236 vsi->hw_filters_del = 0;
1238 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
1240 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1242 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1243 " aq_error %d\n", err, hw->aq.asq_last_status);
1247 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
1248 struct tx_ring *txr = &tx_que->txr;
1249 struct i40e_hmc_obj_txq tctx;
1252 /* Setup the HMC TX Context */
1253 bzero(&tctx, sizeof(tctx));
1254 tctx.new_context = 1;
1255 tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
1256 tctx.qlen = scctx->isc_ntxd[0];
1257 tctx.fc_ena = 0; /* Disable FCoE */
1259 * This value needs to pulled from the VSI that this queue
1260 * is assigned to. Index into array is traffic class.
1262 tctx.rdylist = vsi->info.qs_handle[0];
1264 * Set these to enable Head Writeback
1265 * - Address is last entry in TX ring (reserved for HWB index)
1266 * Leave these as 0 for Descriptor Writeback
1268 if (vsi->enable_head_writeback) {
1269 tctx.head_wb_ena = 1;
1270 tctx.head_wb_addr = txr->tx_paddr +
1271 (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
1273 tctx.head_wb_ena = 0;
1274 tctx.head_wb_addr = 0;
1276 tctx.rdylist_act = 0;
1277 err = i40e_clear_lan_tx_queue_context(hw, i);
1279 device_printf(dev, "Unable to clear TX context\n");
1282 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
1284 device_printf(dev, "Unable to set TX context\n");
1287 /* Associate the ring with this PF */
1288 txctl = I40E_QTX_CTL_PF_QUEUE;
1289 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
1290 I40E_QTX_CTL_PF_INDX_MASK);
1291 wr32(hw, I40E_QTX_CTL(i), txctl);
1294 /* Do ring (re)init */
1295 ixl_init_tx_ring(vsi, tx_que);
1297 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
1298 struct rx_ring *rxr = &rx_que->rxr;
1299 struct i40e_hmc_obj_rxq rctx;
1301 /* Next setup the HMC RX Context */
1302 if (scctx->isc_max_frame_size <= MCLBYTES)
1303 rxr->mbuf_sz = MCLBYTES;
1305 rxr->mbuf_sz = MJUMPAGESIZE;
1307 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
1309 /* Set up an RX context for the HMC */
1310 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
1311 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
1312 /* ignore header split for now */
1313 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
1314 rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
1315 scctx->isc_max_frame_size : max_rxmax;
1317 rctx.dsize = 1; /* do 32byte descriptors */
1318 rctx.hsplit_0 = 0; /* no header split */
1319 rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
1320 rctx.qlen = scctx->isc_nrxd[0];
1321 rctx.tphrdesc_ena = 1;
1322 rctx.tphwdesc_ena = 1;
1323 rctx.tphdata_ena = 0; /* Header Split related */
1324 rctx.tphhead_ena = 0; /* Header Split related */
1325 rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
1328 rctx.showiv = 1; /* Strip inner VLAN header */
1329 rctx.fc_ena = 0; /* Disable FCoE */
1330 rctx.prefena = 1; /* Prefetch descriptors */
1332 err = i40e_clear_lan_rx_queue_context(hw, i);
1335 "Unable to clear RX context %d\n", i);
1338 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
1340 device_printf(dev, "Unable to set RX context %d\n", i);
1343 wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
1349 ixl_free_mac_filters(struct ixl_vsi *vsi)
1351 struct ixl_mac_filter *f;
1353 while (!SLIST_EMPTY(&vsi->ftl)) {
1354 f = SLIST_FIRST(&vsi->ftl);
1355 SLIST_REMOVE_HEAD(&vsi->ftl, next);
1361 ** Provide a update to the queue RX
1362 ** interrupt moderation value.
1365 ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
1367 struct ixl_vsi *vsi = que->vsi;
1368 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1369 struct i40e_hw *hw = vsi->hw;
1370 struct rx_ring *rxr = &que->rxr;
1375 /* Idle, do nothing */
1376 if (rxr->bytes == 0)
1379 if (pf->dynamic_rx_itr) {
1380 rx_bytes = rxr->bytes/rxr->itr;
1383 /* Adjust latency range */
1384 switch (rxr->latency) {
1385 case IXL_LOW_LATENCY:
1386 if (rx_bytes > 10) {
1387 rx_latency = IXL_AVE_LATENCY;
1388 rx_itr = IXL_ITR_20K;
1391 case IXL_AVE_LATENCY:
1392 if (rx_bytes > 20) {
1393 rx_latency = IXL_BULK_LATENCY;
1394 rx_itr = IXL_ITR_8K;
1395 } else if (rx_bytes <= 10) {
1396 rx_latency = IXL_LOW_LATENCY;
1397 rx_itr = IXL_ITR_100K;
1400 case IXL_BULK_LATENCY:
1401 if (rx_bytes <= 20) {
1402 rx_latency = IXL_AVE_LATENCY;
1403 rx_itr = IXL_ITR_20K;
1408 rxr->latency = rx_latency;
1410 if (rx_itr != rxr->itr) {
1411 /* do an exponential smoothing */
1412 rx_itr = (10 * rx_itr * rxr->itr) /
1413 ((9 * rx_itr) + rxr->itr);
1414 rxr->itr = min(rx_itr, IXL_MAX_ITR);
1415 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1416 rxr->me), rxr->itr);
1418 } else { /* We may have have toggled to non-dynamic */
1419 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1420 vsi->rx_itr_setting = pf->rx_itr;
1421 /* Update the hardware if needed */
1422 if (rxr->itr != vsi->rx_itr_setting) {
1423 rxr->itr = vsi->rx_itr_setting;
1424 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
1425 rxr->me), rxr->itr);
1434 ** Provide a update to the queue TX
1435 ** interrupt moderation value.
1438 ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
1440 struct ixl_vsi *vsi = que->vsi;
1441 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1442 struct i40e_hw *hw = vsi->hw;
1443 struct tx_ring *txr = &que->txr;
1449 /* Idle, do nothing */
1450 if (txr->bytes == 0)
1453 if (pf->dynamic_tx_itr) {
1454 tx_bytes = txr->bytes/txr->itr;
1457 switch (txr->latency) {
1458 case IXL_LOW_LATENCY:
1459 if (tx_bytes > 10) {
1460 tx_latency = IXL_AVE_LATENCY;
1461 tx_itr = IXL_ITR_20K;
1464 case IXL_AVE_LATENCY:
1465 if (tx_bytes > 20) {
1466 tx_latency = IXL_BULK_LATENCY;
1467 tx_itr = IXL_ITR_8K;
1468 } else if (tx_bytes <= 10) {
1469 tx_latency = IXL_LOW_LATENCY;
1470 tx_itr = IXL_ITR_100K;
1473 case IXL_BULK_LATENCY:
1474 if (tx_bytes <= 20) {
1475 tx_latency = IXL_AVE_LATENCY;
1476 tx_itr = IXL_ITR_20K;
1481 txr->latency = tx_latency;
1483 if (tx_itr != txr->itr) {
1484 /* do an exponential smoothing */
1485 tx_itr = (10 * tx_itr * txr->itr) /
1486 ((9 * tx_itr) + txr->itr);
1487 txr->itr = min(tx_itr, IXL_MAX_ITR);
1488 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1489 txr->me), txr->itr);
1492 } else { /* We may have have toggled to non-dynamic */
1493 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1494 vsi->tx_itr_setting = pf->tx_itr;
1495 /* Update the hardware if needed */
1496 if (txr->itr != vsi->tx_itr_setting) {
1497 txr->itr = vsi->tx_itr_setting;
1498 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
1499 txr->me), txr->itr);
1509 * ixl_sysctl_qtx_tail_handler
1510 * Retrieves I40E_QTX_TAIL value from hardware
1514 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
1516 struct ixl_tx_queue *tx_que;
1520 tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
1521 if (!tx_que) return 0;
1523 val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
1524 error = sysctl_handle_int(oidp, &val, 0, req);
1525 if (error || !req->newptr)
1531 * ixl_sysctl_qrx_tail_handler
1532 * Retrieves I40E_QRX_TAIL value from hardware
1536 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
1538 struct ixl_rx_queue *rx_que;
1542 rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
1543 if (!rx_que) return 0;
1545 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
1546 error = sysctl_handle_int(oidp, &val, 0, req);
1547 if (error || !req->newptr)
1554 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
1555 * Writes to the ITR registers immediately.
1558 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
1560 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1561 device_t dev = pf->dev;
1563 int requested_tx_itr;
1565 requested_tx_itr = pf->tx_itr;
1566 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
1567 if ((error) || (req->newptr == NULL))
1569 if (pf->dynamic_tx_itr) {
1571 "Cannot set TX itr value while dynamic TX itr is enabled\n");
1574 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
1576 "Invalid TX itr value; value must be between 0 and %d\n",
1581 pf->tx_itr = requested_tx_itr;
1582 ixl_configure_tx_itr(pf);
1588 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1589 * Writes to the ITR registers immediately.
1592 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1594 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1595 device_t dev = pf->dev;
1597 int requested_rx_itr;
1599 requested_rx_itr = pf->rx_itr;
1600 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1601 if ((error) || (req->newptr == NULL))
1603 if (pf->dynamic_rx_itr) {
1605 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1608 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1610 "Invalid RX itr value; value must be between 0 and %d\n",
1615 pf->rx_itr = requested_rx_itr;
1616 ixl_configure_rx_itr(pf);
1622 ixl_add_hw_stats(struct ixl_pf *pf)
1624 struct ixl_vsi *vsi = &pf->vsi;
1625 device_t dev = iflib_get_dev(vsi->ctx);
1626 struct i40e_hw_port_stats *pf_stats = &pf->stats;
1628 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1629 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1630 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1632 /* Driver statistics */
1633 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
1634 CTLFLAG_RD, &pf->admin_irq,
1635 "Admin Queue IRQs received");
1637 ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
1639 ixl_add_queues_sysctls(dev, vsi);
1641 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
1645 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1646 struct sysctl_oid_list *child,
1647 struct i40e_hw_port_stats *stats)
1649 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1650 CTLFLAG_RD, NULL, "Mac Statistics");
1651 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1653 struct i40e_eth_stats *eth_stats = &stats->eth;
1654 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1656 struct ixl_sysctl_info ctls[] =
1658 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1659 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1660 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1661 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1662 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1663 /* Packet Reception Stats */
1664 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1665 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1666 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1667 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1668 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1669 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1670 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1671 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1672 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1673 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1674 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1675 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1676 /* Packet Transmission Stats */
1677 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1678 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1679 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1680 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1681 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1682 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1683 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1685 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1686 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1687 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1688 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1693 struct ixl_sysctl_info *entry = ctls;
1694 while (entry->stat != 0)
1696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1697 CTLFLAG_RD, entry->stat,
1698 entry->description);
1704 ixl_set_rss_key(struct ixl_pf *pf)
1706 struct i40e_hw *hw = &pf->hw;
1707 struct ixl_vsi *vsi = &pf->vsi;
1708 device_t dev = pf->dev;
1709 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1710 enum i40e_status_code status;
1713 /* Fetch the configured RSS key */
1714 rss_getkey((uint8_t *) &rss_seed);
1716 ixl_get_default_rss_key(rss_seed);
1718 /* Fill out hash function seed */
1719 if (hw->mac.type == I40E_MAC_X722) {
1720 struct i40e_aqc_get_set_rss_key_data key_data;
1721 bcopy(rss_seed, &key_data, 52);
1722 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1725 "i40e_aq_set_rss_key status %s, error %s\n",
1726 i40e_stat_str(hw, status),
1727 i40e_aq_str(hw, hw->aq.asq_last_status));
1729 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1730 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1735 * Configure enabled PCTYPES for RSS.
1738 ixl_set_rss_pctypes(struct ixl_pf *pf)
1740 struct i40e_hw *hw = &pf->hw;
1741 u64 set_hena = 0, hena;
1744 u32 rss_hash_config;
1746 rss_hash_config = rss_gethashconfig();
1747 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1748 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1749 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1750 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1751 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1752 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1753 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1754 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1755 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1756 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1757 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1758 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1759 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1760 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1762 if (hw->mac.type == I40E_MAC_X722)
1763 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1765 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1767 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1768 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1770 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1771 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1776 ixl_set_rss_hlut(struct ixl_pf *pf)
1778 struct i40e_hw *hw = &pf->hw;
1779 struct ixl_vsi *vsi = &pf->vsi;
1780 device_t dev = iflib_get_dev(vsi->ctx);
1782 int lut_entry_width;
1784 enum i40e_status_code status;
1786 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
1788 /* Populate the LUT with max no. of queues in round robin fashion */
1790 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
1793 * Fetch the RSS bucket id for the given indirection entry.
1794 * Cap it at the number of configured buckets (which is
1797 que_id = rss_get_indirection_to_bucket(i);
1798 que_id = que_id % vsi->num_rx_queues;
1800 que_id = i % vsi->num_rx_queues;
1802 lut = (que_id & ((0x1 << lut_entry_width) - 1));
1806 if (hw->mac.type == I40E_MAC_X722) {
1807 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
1809 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1810 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1812 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
1813 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
1819 ** Setup the PF's RSS parameters.
1822 ixl_config_rss(struct ixl_pf *pf)
1824 ixl_set_rss_key(pf);
1825 ixl_set_rss_pctypes(pf);
1826 ixl_set_rss_hlut(pf);
1830 ** This routine updates vlan filters, called by init
1831 ** it scans the filter table and then updates the hw
1832 ** after a soft reset.
1835 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
1837 struct ixl_mac_filter *f;
1840 if (vsi->num_vlans == 0)
1843 ** Scan the filter list for vlan entries,
1844 ** mark them for addition and then call
1845 ** for the AQ update.
1847 SLIST_FOREACH(f, &vsi->ftl, next) {
1848 if (f->flags & IXL_FILTER_VLAN) {
1856 printf("setup vlan: no filters found!\n");
1859 flags = IXL_FILTER_VLAN;
1860 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
1861 ixl_add_hw_filters(vsi, flags, cnt);
1865 * In some firmware versions there is default MAC/VLAN filter
1866 * configured which interferes with filters managed by driver.
1867 * Make sure it's removed.
1870 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1872 struct i40e_aqc_remove_macvlan_element_data e;
1874 bzero(&e, sizeof(e));
1875 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1877 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1878 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1880 bzero(&e, sizeof(e));
1881 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1883 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1884 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1885 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1889 ** Initialize filter list and add filters that the hardware
1890 ** needs to know about.
1892 ** Requires VSI's filter list & seid to be set before calling.
1895 ixl_init_filters(struct ixl_vsi *vsi)
1897 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1899 /* Initialize mac filter list for VSI */
1900 SLIST_INIT(&vsi->ftl);
1902 /* Receive broadcast Ethernet frames */
1903 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1905 ixl_del_default_hw_filters(vsi);
1907 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1909 * Prevent Tx flow control frames from being sent out by
1910 * non-firmware transmitters.
1911 * This affects every VSI in the PF.
1913 if (pf->enable_tx_fc_filter)
1914 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1918 ** This routine adds mulicast filters
1921 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1923 struct ixl_mac_filter *f;
1925 /* Does one already exist */
1926 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1930 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1932 f->flags |= IXL_FILTER_MC;
1934 printf("WARNING: no filter available!!\n");
1938 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1940 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1944 * This routine adds a MAC/VLAN filter to the software filter
1945 * list, then adds that new filter to the HW if it doesn't already
1946 * exist in the SW filter list.
1949 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1951 struct ixl_mac_filter *f, *tmp;
1955 DEBUGOUT("ixl_add_filter: begin");
1960 /* Does one already exist */
1961 f = ixl_find_filter(vsi, macaddr, vlan);
1965 ** Is this the first vlan being registered, if so we
1966 ** need to remove the ANY filter that indicates we are
1967 ** not in a vlan, and replace that with a 0 filter.
1969 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1970 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1972 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1973 ixl_add_filter(vsi, macaddr, 0);
1977 f = ixl_new_filter(vsi, macaddr, vlan);
1979 device_printf(dev, "WARNING: no filter available!!\n");
1982 if (f->vlan != IXL_VLAN_ANY)
1983 f->flags |= IXL_FILTER_VLAN;
1987 f->flags |= IXL_FILTER_USED;
1988 ixl_add_hw_filters(vsi, f->flags, 1);
1992 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1994 struct ixl_mac_filter *f;
1996 f = ixl_find_filter(vsi, macaddr, vlan);
2000 f->flags |= IXL_FILTER_DEL;
2001 ixl_del_hw_filters(vsi, 1);
2002 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
2005 /* Check if this is the last vlan removal */
2006 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
2007 /* Switch back to a non-vlan filter */
2008 ixl_del_filter(vsi, macaddr, 0);
2009 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
2015 ** Find the filter with both matching mac addr and vlan id
2017 struct ixl_mac_filter *
2018 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
2020 struct ixl_mac_filter *f;
2022 SLIST_FOREACH(f, &vsi->ftl, next) {
2023 if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
2024 && (f->vlan == vlan)) {
2033 ** This routine takes additions to the vsi filter
2034 ** table and creates an Admin Queue call to create
2035 ** the filters in the hardware.
2038 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
2040 struct i40e_aqc_add_macvlan_element_data *a, *b;
2041 struct ixl_mac_filter *f;
2045 enum i40e_status_code status;
2053 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
2057 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
2058 M_DEVBUF, M_NOWAIT | M_ZERO);
2060 device_printf(dev, "add_hw_filters failed to get memory\n");
2065 ** Scan the filter list, each time we find one
2066 ** we add it to the admin queue array and turn off
2069 SLIST_FOREACH(f, &vsi->ftl, next) {
2070 if ((f->flags & flags) == flags) {
2071 b = &a[j]; // a pox on fvl long names :)
2072 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
2073 if (f->vlan == IXL_VLAN_ANY) {
2075 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2077 b->vlan_tag = f->vlan;
2080 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2081 f->flags &= ~IXL_FILTER_ADD;
2084 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
2085 MAC_FORMAT_ARGS(f->macaddr));
2091 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
2093 device_printf(dev, "i40e_aq_add_macvlan status %s, "
2094 "error %s\n", i40e_stat_str(hw, status),
2095 i40e_aq_str(hw, hw->aq.asq_last_status));
2097 vsi->hw_filters_add += j;
2104 ** This routine takes removals in the vsi filter
2105 ** table and creates an Admin Queue call to delete
2106 ** the filters in the hardware.
2109 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
2111 struct i40e_aqc_remove_macvlan_element_data *d, *e;
2115 struct ixl_mac_filter *f, *f_temp;
2116 enum i40e_status_code status;
2123 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
2124 M_DEVBUF, M_NOWAIT | M_ZERO);
2126 device_printf(dev, "%s: failed to get memory\n", __func__);
2130 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
2131 if (f->flags & IXL_FILTER_DEL) {
2132 e = &d[j]; // a pox on fvl long names :)
2133 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
2134 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2135 if (f->vlan == IXL_VLAN_ANY) {
2137 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2139 e->vlan_tag = f->vlan;
2142 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
2143 MAC_FORMAT_ARGS(f->macaddr));
2145 /* delete entry from vsi list */
2146 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
2154 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
2157 for (int i = 0; i < j; i++)
2158 sc += (!d[i].error_code);
2159 vsi->hw_filters_del += sc;
2161 "Failed to remove %d/%d filters, error %s\n",
2162 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
2164 vsi->hw_filters_del += j;
2171 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2173 struct i40e_hw *hw = &pf->hw;
2178 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2180 ixl_dbg(pf, IXL_DBG_EN_DIS,
2181 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
2184 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
2186 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2187 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2188 I40E_QTX_ENA_QENA_STAT_MASK;
2189 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2190 /* Verify the enable took */
2191 for (int j = 0; j < 10; j++) {
2192 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2193 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2195 i40e_usec_delay(10);
2197 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2198 device_printf(pf->dev, "TX queue %d still disabled!\n",
2207 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2209 struct i40e_hw *hw = &pf->hw;
2214 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2216 ixl_dbg(pf, IXL_DBG_EN_DIS,
2217 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
2220 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2221 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
2222 I40E_QRX_ENA_QENA_STAT_MASK;
2223 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2224 /* Verify the enable took */
2225 for (int j = 0; j < 10; j++) {
2226 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2227 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2229 i40e_usec_delay(10);
2231 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2232 device_printf(pf->dev, "RX queue %d still disabled!\n",
2241 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2245 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
2246 /* Called function already prints error message */
2249 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
2253 /* For PF VSI only */
2255 ixl_enable_rings(struct ixl_vsi *vsi)
2257 struct ixl_pf *pf = vsi->back;
2260 for (int i = 0; i < vsi->num_tx_queues; i++)
2261 error = ixl_enable_tx_ring(pf, &pf->qtag, i);
2263 for (int i = 0; i < vsi->num_rx_queues; i++)
2264 error = ixl_enable_rx_ring(pf, &pf->qtag, i);
2270 * Returns error on first ring that is detected hung.
2273 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2275 struct i40e_hw *hw = &pf->hw;
2280 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2282 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
2283 i40e_usec_delay(500);
2285 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2286 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2287 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
2288 /* Verify the disable took */
2289 for (int j = 0; j < 10; j++) {
2290 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
2291 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2293 i40e_msec_delay(10);
2295 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
2296 device_printf(pf->dev, "TX queue %d still enabled!\n",
2305 * Returns error on first ring that is detected hung.
2308 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2310 struct i40e_hw *hw = &pf->hw;
2315 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
2317 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2318 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2319 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
2320 /* Verify the disable took */
2321 for (int j = 0; j < 10; j++) {
2322 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
2323 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2325 i40e_msec_delay(10);
2327 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
2328 device_printf(pf->dev, "RX queue %d still enabled!\n",
2337 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
2341 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
2342 /* Called function already prints error message */
2345 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
2350 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
2354 for (int i = 0; i < vsi->num_tx_queues; i++)
2355 error = ixl_disable_tx_ring(pf, qtag, i);
2357 for (int i = 0; i < vsi->num_rx_queues; i++)
2358 error = ixl_disable_rx_ring(pf, qtag, i);
2364 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
2366 struct i40e_hw *hw = &pf->hw;
2367 device_t dev = pf->dev;
2369 bool mdd_detected = false;
2370 bool pf_mdd_detected = false;
2371 bool vf_mdd_detected = false;
2374 u8 pf_mdet_num, vp_mdet_num;
2377 /* find what triggered the MDD event */
2378 reg = rd32(hw, I40E_GL_MDET_TX);
2379 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
2380 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
2381 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2382 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
2383 I40E_GL_MDET_TX_VF_NUM_SHIFT;
2384 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
2385 I40E_GL_MDET_TX_EVENT_SHIFT;
2386 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
2387 I40E_GL_MDET_TX_QUEUE_SHIFT;
2388 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
2389 mdd_detected = true;
2395 reg = rd32(hw, I40E_PF_MDET_TX);
2396 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
2397 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
2398 pf_mdet_num = hw->pf_id;
2399 pf_mdd_detected = true;
2402 /* Check if MDD was caused by a VF */
2403 for (int i = 0; i < pf->num_vfs; i++) {
2405 reg = rd32(hw, I40E_VP_MDET_TX(i));
2406 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
2407 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
2409 vf->num_mdd_events++;
2410 vf_mdd_detected = true;
2414 /* Print out an error message */
2415 if (vf_mdd_detected && pf_mdd_detected)
2417 "Malicious Driver Detection event %d"
2418 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
2419 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
2420 else if (vf_mdd_detected && !pf_mdd_detected)
2422 "Malicious Driver Detection event %d"
2423 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
2424 event, queue, pf_num, vf_num, vp_mdet_num);
2425 else if (!vf_mdd_detected && pf_mdd_detected)
2427 "Malicious Driver Detection event %d"
2428 " on TX queue %d, pf number %d (PF-%d)\n",
2429 event, queue, pf_num, pf_mdet_num);
2430 /* Theoretically shouldn't happen */
2433 "TX Malicious Driver Detection event (unknown)\n");
2437 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
2439 struct i40e_hw *hw = &pf->hw;
2440 device_t dev = pf->dev;
2442 bool mdd_detected = false;
2443 bool pf_mdd_detected = false;
2444 bool vf_mdd_detected = false;
2447 u8 pf_mdet_num, vp_mdet_num;
2451 * GL_MDET_RX doesn't contain VF number information, unlike
2454 reg = rd32(hw, I40E_GL_MDET_RX);
2455 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
2456 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
2457 I40E_GL_MDET_RX_FUNCTION_SHIFT;
2458 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
2459 I40E_GL_MDET_RX_EVENT_SHIFT;
2460 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
2461 I40E_GL_MDET_RX_QUEUE_SHIFT;
2462 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
2463 mdd_detected = true;
2469 reg = rd32(hw, I40E_PF_MDET_RX);
2470 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
2471 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
2472 pf_mdet_num = hw->pf_id;
2473 pf_mdd_detected = true;
2476 /* Check if MDD was caused by a VF */
2477 for (int i = 0; i < pf->num_vfs; i++) {
2479 reg = rd32(hw, I40E_VP_MDET_RX(i));
2480 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
2481 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
2483 vf->num_mdd_events++;
2484 vf_mdd_detected = true;
2488 /* Print out an error message */
2489 if (vf_mdd_detected && pf_mdd_detected)
2491 "Malicious Driver Detection event %d"
2492 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
2493 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
2494 else if (vf_mdd_detected && !pf_mdd_detected)
2496 "Malicious Driver Detection event %d"
2497 " on RX queue %d, pf number %d, (VF-%d)\n",
2498 event, queue, pf_num, vp_mdet_num);
2499 else if (!vf_mdd_detected && pf_mdd_detected)
2501 "Malicious Driver Detection event %d"
2502 " on RX queue %d, pf number %d (PF-%d)\n",
2503 event, queue, pf_num, pf_mdet_num);
2504 /* Theoretically shouldn't happen */
2507 "RX Malicious Driver Detection event (unknown)\n");
2511 * ixl_handle_mdd_event
2513 * Called from interrupt handler to identify possibly malicious vfs
2514 * (But also detects events from the PF, as well)
2517 ixl_handle_mdd_event(struct ixl_pf *pf)
2519 struct i40e_hw *hw = &pf->hw;
2523 * Handle both TX/RX because it's possible they could
2524 * both trigger in the same interrupt.
2526 ixl_handle_tx_mdd_event(pf);
2527 ixl_handle_rx_mdd_event(pf);
2529 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
2531 /* re-enable mdd interrupt cause */
2532 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2533 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2534 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2539 ixl_enable_intr(struct ixl_vsi *vsi)
2541 struct i40e_hw *hw = vsi->hw;
2542 struct ixl_rx_queue *que = vsi->rx_queues;
2544 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2545 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2546 ixl_enable_queue(hw, que->rxr.me);
2548 ixl_enable_intr0(hw);
2552 ixl_disable_rings_intr(struct ixl_vsi *vsi)
2554 struct i40e_hw *hw = vsi->hw;
2555 struct ixl_rx_queue *que = vsi->rx_queues;
2557 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
2558 ixl_disable_queue(hw, que->rxr.me);
2562 ixl_enable_intr0(struct i40e_hw *hw)
2566 /* Use IXL_ITR_NONE so ITR isn't updated here */
2567 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2568 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2569 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2570 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2574 ixl_disable_intr0(struct i40e_hw *hw)
2578 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2579 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2584 ixl_enable_queue(struct i40e_hw *hw, int id)
2588 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2589 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2590 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2591 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2595 ixl_disable_queue(struct i40e_hw *hw, int id)
2599 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2600 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2604 ixl_update_stats_counters(struct ixl_pf *pf)
2606 struct i40e_hw *hw = &pf->hw;
2607 struct ixl_vsi *vsi = &pf->vsi;
2610 struct i40e_hw_port_stats *nsd = &pf->stats;
2611 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2613 /* Update hw stats */
2614 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2615 pf->stat_offsets_loaded,
2616 &osd->crc_errors, &nsd->crc_errors);
2617 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2618 pf->stat_offsets_loaded,
2619 &osd->illegal_bytes, &nsd->illegal_bytes);
2620 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2621 I40E_GLPRT_GORCL(hw->port),
2622 pf->stat_offsets_loaded,
2623 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2624 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2625 I40E_GLPRT_GOTCL(hw->port),
2626 pf->stat_offsets_loaded,
2627 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2628 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2629 pf->stat_offsets_loaded,
2630 &osd->eth.rx_discards,
2631 &nsd->eth.rx_discards);
2632 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2633 I40E_GLPRT_UPRCL(hw->port),
2634 pf->stat_offsets_loaded,
2635 &osd->eth.rx_unicast,
2636 &nsd->eth.rx_unicast);
2637 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2638 I40E_GLPRT_UPTCL(hw->port),
2639 pf->stat_offsets_loaded,
2640 &osd->eth.tx_unicast,
2641 &nsd->eth.tx_unicast);
2642 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2643 I40E_GLPRT_MPRCL(hw->port),
2644 pf->stat_offsets_loaded,
2645 &osd->eth.rx_multicast,
2646 &nsd->eth.rx_multicast);
2647 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2648 I40E_GLPRT_MPTCL(hw->port),
2649 pf->stat_offsets_loaded,
2650 &osd->eth.tx_multicast,
2651 &nsd->eth.tx_multicast);
2652 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2653 I40E_GLPRT_BPRCL(hw->port),
2654 pf->stat_offsets_loaded,
2655 &osd->eth.rx_broadcast,
2656 &nsd->eth.rx_broadcast);
2657 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2658 I40E_GLPRT_BPTCL(hw->port),
2659 pf->stat_offsets_loaded,
2660 &osd->eth.tx_broadcast,
2661 &nsd->eth.tx_broadcast);
2663 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2664 pf->stat_offsets_loaded,
2665 &osd->tx_dropped_link_down,
2666 &nsd->tx_dropped_link_down);
2667 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2668 pf->stat_offsets_loaded,
2669 &osd->mac_local_faults,
2670 &nsd->mac_local_faults);
2671 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2672 pf->stat_offsets_loaded,
2673 &osd->mac_remote_faults,
2674 &nsd->mac_remote_faults);
2675 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2676 pf->stat_offsets_loaded,
2677 &osd->rx_length_errors,
2678 &nsd->rx_length_errors);
2680 /* Flow control (LFC) stats */
2681 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2682 pf->stat_offsets_loaded,
2683 &osd->link_xon_rx, &nsd->link_xon_rx);
2684 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2685 pf->stat_offsets_loaded,
2686 &osd->link_xon_tx, &nsd->link_xon_tx);
2687 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2688 pf->stat_offsets_loaded,
2689 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2690 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2691 pf->stat_offsets_loaded,
2692 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2694 /* Packet size stats rx */
2695 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2696 I40E_GLPRT_PRC64L(hw->port),
2697 pf->stat_offsets_loaded,
2698 &osd->rx_size_64, &nsd->rx_size_64);
2699 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2700 I40E_GLPRT_PRC127L(hw->port),
2701 pf->stat_offsets_loaded,
2702 &osd->rx_size_127, &nsd->rx_size_127);
2703 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2704 I40E_GLPRT_PRC255L(hw->port),
2705 pf->stat_offsets_loaded,
2706 &osd->rx_size_255, &nsd->rx_size_255);
2707 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2708 I40E_GLPRT_PRC511L(hw->port),
2709 pf->stat_offsets_loaded,
2710 &osd->rx_size_511, &nsd->rx_size_511);
2711 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2712 I40E_GLPRT_PRC1023L(hw->port),
2713 pf->stat_offsets_loaded,
2714 &osd->rx_size_1023, &nsd->rx_size_1023);
2715 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2716 I40E_GLPRT_PRC1522L(hw->port),
2717 pf->stat_offsets_loaded,
2718 &osd->rx_size_1522, &nsd->rx_size_1522);
2719 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2720 I40E_GLPRT_PRC9522L(hw->port),
2721 pf->stat_offsets_loaded,
2722 &osd->rx_size_big, &nsd->rx_size_big);
2724 /* Packet size stats tx */
2725 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2726 I40E_GLPRT_PTC64L(hw->port),
2727 pf->stat_offsets_loaded,
2728 &osd->tx_size_64, &nsd->tx_size_64);
2729 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2730 I40E_GLPRT_PTC127L(hw->port),
2731 pf->stat_offsets_loaded,
2732 &osd->tx_size_127, &nsd->tx_size_127);
2733 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2734 I40E_GLPRT_PTC255L(hw->port),
2735 pf->stat_offsets_loaded,
2736 &osd->tx_size_255, &nsd->tx_size_255);
2737 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2738 I40E_GLPRT_PTC511L(hw->port),
2739 pf->stat_offsets_loaded,
2740 &osd->tx_size_511, &nsd->tx_size_511);
2741 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2742 I40E_GLPRT_PTC1023L(hw->port),
2743 pf->stat_offsets_loaded,
2744 &osd->tx_size_1023, &nsd->tx_size_1023);
2745 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2746 I40E_GLPRT_PTC1522L(hw->port),
2747 pf->stat_offsets_loaded,
2748 &osd->tx_size_1522, &nsd->tx_size_1522);
2749 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2750 I40E_GLPRT_PTC9522L(hw->port),
2751 pf->stat_offsets_loaded,
2752 &osd->tx_size_big, &nsd->tx_size_big);
2754 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2755 pf->stat_offsets_loaded,
2756 &osd->rx_undersize, &nsd->rx_undersize);
2757 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2758 pf->stat_offsets_loaded,
2759 &osd->rx_fragments, &nsd->rx_fragments);
2760 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2761 pf->stat_offsets_loaded,
2762 &osd->rx_oversize, &nsd->rx_oversize);
2763 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2764 pf->stat_offsets_loaded,
2765 &osd->rx_jabber, &nsd->rx_jabber);
2766 pf->stat_offsets_loaded = true;
2769 /* Update vsi stats */
2770 ixl_update_vsi_stats(vsi);
2772 for (int i = 0; i < pf->num_vfs; i++) {
2774 if (vf->vf_flags & VF_FLAG_ENABLED)
2775 ixl_update_eth_stats(&pf->vfs[i].vsi);
2780 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
2782 struct i40e_hw *hw = &pf->hw;
2783 device_t dev = pf->dev;
2786 error = i40e_shutdown_lan_hmc(hw);
2789 "Shutdown LAN HMC failed with code %d\n", error);
2791 ixl_disable_intr0(hw);
2793 error = i40e_shutdown_adminq(hw);
2796 "Shutdown Admin queue failed with code %d\n", error);
2798 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
2803 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
2805 struct i40e_hw *hw = &pf->hw;
2806 struct ixl_vsi *vsi = &pf->vsi;
2807 device_t dev = pf->dev;
2810 device_printf(dev, "Rebuilding driver state...\n");
2812 error = i40e_pf_reset(hw);
2814 device_printf(dev, "PF reset failure %s\n",
2815 i40e_stat_str(hw, error));
2816 goto ixl_rebuild_hw_structs_after_reset_err;
2820 error = i40e_init_adminq(hw);
2821 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
2822 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
2824 goto ixl_rebuild_hw_structs_after_reset_err;
2827 i40e_clear_pxe_mode(hw);
2829 error = ixl_get_hw_capabilities(pf);
2831 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
2832 goto ixl_rebuild_hw_structs_after_reset_err;
2835 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
2836 hw->func_caps.num_rx_qp, 0, 0);
2838 device_printf(dev, "init_lan_hmc failed: %d\n", error);
2839 goto ixl_rebuild_hw_structs_after_reset_err;
2842 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
2844 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
2845 goto ixl_rebuild_hw_structs_after_reset_err;
2848 /* reserve a contiguous allocation for the PF's VSI */
2849 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
2851 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
2853 /* TODO: error handling */
2856 error = ixl_switch_config(pf);
2858 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
2861 goto ixl_rebuild_hw_structs_after_reset_err;
2864 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
2867 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
2868 " aq_err %d\n", error, hw->aq.asq_last_status);
2870 goto ixl_rebuild_hw_structs_after_reset_err;
2874 error = i40e_set_fc(hw, &set_fc_err_mask, true);
2876 device_printf(dev, "init: setting link flow control failed; retcode %d,"
2877 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
2879 goto ixl_rebuild_hw_structs_after_reset_err;
2882 /* Remove default filters reinstalled by FW on reset */
2883 ixl_del_default_hw_filters(vsi);
2885 /* Determine link state */
2886 if (ixl_attach_get_link_status(pf)) {
2888 /* TODO: error handling */
2891 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
2892 ixl_get_fw_lldp_status(pf);
2894 /* Keep admin queue interrupts active while driver is loaded */
2895 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
2896 ixl_configure_intr0_msix(pf);
2897 ixl_enable_intr0(hw);
2900 device_printf(dev, "Rebuilding driver state done.\n");
2903 ixl_rebuild_hw_structs_after_reset_err:
2904 device_printf(dev, "Reload the driver to recover\n");
2909 ixl_handle_empr_reset(struct ixl_pf *pf)
2911 struct ixl_vsi *vsi = &pf->vsi;
2912 struct i40e_hw *hw = &pf->hw;
2913 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
2917 ixl_prepare_for_reset(pf, is_up);
2919 /* Typically finishes within 3-4 seconds */
2920 while (count++ < 100) {
2921 reg = rd32(hw, I40E_GLGEN_RSTAT)
2922 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
2924 i40e_msec_delay(100);
2928 ixl_dbg(pf, IXL_DBG_INFO,
2929 "Reset wait count: %d\n", count);
2931 ixl_rebuild_hw_structs_after_reset(pf);
2933 atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
2937 * Update VSI-specific ethernet statistics counters.
2940 ixl_update_eth_stats(struct ixl_vsi *vsi)
2942 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2943 struct i40e_hw *hw = &pf->hw;
2944 struct i40e_eth_stats *es;
2945 struct i40e_eth_stats *oes;
2946 struct i40e_hw_port_stats *nsd;
2947 u16 stat_idx = vsi->info.stat_counter_idx;
2949 es = &vsi->eth_stats;
2950 oes = &vsi->eth_stats_offsets;
2953 /* Gather up the stats that the hw collects */
2954 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2955 vsi->stat_offsets_loaded,
2956 &oes->tx_errors, &es->tx_errors);
2957 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2958 vsi->stat_offsets_loaded,
2959 &oes->rx_discards, &es->rx_discards);
2961 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2962 I40E_GLV_GORCL(stat_idx),
2963 vsi->stat_offsets_loaded,
2964 &oes->rx_bytes, &es->rx_bytes);
2965 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2966 I40E_GLV_UPRCL(stat_idx),
2967 vsi->stat_offsets_loaded,
2968 &oes->rx_unicast, &es->rx_unicast);
2969 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2970 I40E_GLV_MPRCL(stat_idx),
2971 vsi->stat_offsets_loaded,
2972 &oes->rx_multicast, &es->rx_multicast);
2973 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2974 I40E_GLV_BPRCL(stat_idx),
2975 vsi->stat_offsets_loaded,
2976 &oes->rx_broadcast, &es->rx_broadcast);
2978 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2979 I40E_GLV_GOTCL(stat_idx),
2980 vsi->stat_offsets_loaded,
2981 &oes->tx_bytes, &es->tx_bytes);
2982 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2983 I40E_GLV_UPTCL(stat_idx),
2984 vsi->stat_offsets_loaded,
2985 &oes->tx_unicast, &es->tx_unicast);
2986 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2987 I40E_GLV_MPTCL(stat_idx),
2988 vsi->stat_offsets_loaded,
2989 &oes->tx_multicast, &es->tx_multicast);
2990 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2991 I40E_GLV_BPTCL(stat_idx),
2992 vsi->stat_offsets_loaded,
2993 &oes->tx_broadcast, &es->tx_broadcast);
2994 vsi->stat_offsets_loaded = true;
2998 ixl_update_vsi_stats(struct ixl_vsi *vsi)
3002 struct i40e_eth_stats *es;
3005 struct i40e_hw_port_stats *nsd;
3009 es = &vsi->eth_stats;
3012 ixl_update_eth_stats(vsi);
3014 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
3016 /* Update ifnet stats */
3017 IXL_SET_IPACKETS(vsi, es->rx_unicast +
3020 IXL_SET_OPACKETS(vsi, es->tx_unicast +
3023 IXL_SET_IBYTES(vsi, es->rx_bytes);
3024 IXL_SET_OBYTES(vsi, es->tx_bytes);
3025 IXL_SET_IMCASTS(vsi, es->rx_multicast);
3026 IXL_SET_OMCASTS(vsi, es->tx_multicast);
3028 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
3029 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
3031 IXL_SET_OERRORS(vsi, es->tx_errors);
3032 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
3033 IXL_SET_OQDROPS(vsi, tx_discards);
3034 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
3035 IXL_SET_COLLISIONS(vsi, 0);
3039 * Reset all of the stats for the given pf
3042 ixl_pf_reset_stats(struct ixl_pf *pf)
3044 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
3045 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
3046 pf->stat_offsets_loaded = false;
3050 * Resets all stats of the given vsi
3053 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
3055 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
3056 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
3057 vsi->stat_offsets_loaded = false;
3061 * Read and update a 48 bit stat from the hw
3063 * Since the device stats are not reset at PFReset, they likely will not
3064 * be zeroed when the driver starts. We'll save the first values read
3065 * and use them as offsets to be subtracted from the raw values in order
3066 * to report stats that count from zero.
3069 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
3070 bool offset_loaded, u64 *offset, u64 *stat)
3074 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
3075 new_data = rd64(hw, loreg);
3078 * Use two rd32's instead of one rd64; FreeBSD versions before
3079 * 10 don't support 64-bit bus reads/writes.
3081 new_data = rd32(hw, loreg);
3082 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3087 if (new_data >= *offset)
3088 *stat = new_data - *offset;
3090 *stat = (new_data + ((u64)1 << 48)) - *offset;
3091 *stat &= 0xFFFFFFFFFFFFULL;
3095 * Read and update a 32 bit stat from the hw
3098 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
3099 bool offset_loaded, u64 *offset, u64 *stat)
3103 new_data = rd32(hw, reg);
3106 if (new_data >= *offset)
3107 *stat = (u32)(new_data - *offset);
3109 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
3113 ixl_add_device_sysctls(struct ixl_pf *pf)
3115 device_t dev = pf->dev;
3116 struct i40e_hw *hw = &pf->hw;
3118 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3119 struct sysctl_oid_list *ctx_list =
3120 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3122 struct sysctl_oid *debug_node;
3123 struct sysctl_oid_list *debug_list;
3125 struct sysctl_oid *fec_node;
3126 struct sysctl_oid_list *fec_list;
3128 /* Set up sysctls */
3129 SYSCTL_ADD_PROC(ctx, ctx_list,
3130 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
3131 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
3133 SYSCTL_ADD_PROC(ctx, ctx_list,
3134 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
3135 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
3137 SYSCTL_ADD_PROC(ctx, ctx_list,
3138 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
3139 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
3141 SYSCTL_ADD_PROC(ctx, ctx_list,
3142 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
3143 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
3145 SYSCTL_ADD_PROC(ctx, ctx_list,
3146 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
3147 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
3149 SYSCTL_ADD_PROC(ctx, ctx_list,
3150 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
3151 pf, 0, ixl_sysctl_unallocated_queues, "I",
3152 "Queues not allocated to a PF or VF");
3154 SYSCTL_ADD_PROC(ctx, ctx_list,
3155 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
3156 pf, 0, ixl_sysctl_pf_tx_itr, "I",
3157 "Immediately set TX ITR value for all queues");
3159 SYSCTL_ADD_PROC(ctx, ctx_list,
3160 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
3161 pf, 0, ixl_sysctl_pf_rx_itr, "I",
3162 "Immediately set RX ITR value for all queues");
3164 SYSCTL_ADD_INT(ctx, ctx_list,
3165 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
3166 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
3168 SYSCTL_ADD_INT(ctx, ctx_list,
3169 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
3170 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
3172 /* Add FEC sysctls for 25G adapters */
3173 if (i40e_is_25G_device(hw->device_id)) {
3174 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3175 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
3176 fec_list = SYSCTL_CHILDREN(fec_node);
3178 SYSCTL_ADD_PROC(ctx, fec_list,
3179 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
3180 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
3182 SYSCTL_ADD_PROC(ctx, fec_list,
3183 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
3184 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
3186 SYSCTL_ADD_PROC(ctx, fec_list,
3187 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
3188 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
3190 SYSCTL_ADD_PROC(ctx, fec_list,
3191 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
3192 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
3194 SYSCTL_ADD_PROC(ctx, fec_list,
3195 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
3196 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
3199 SYSCTL_ADD_PROC(ctx, ctx_list,
3200 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
3201 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
3203 /* Add sysctls meant to print debug information, but don't list them
3204 * in "sysctl -a" output. */
3205 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
3206 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
3207 debug_list = SYSCTL_CHILDREN(debug_node);
3209 SYSCTL_ADD_UINT(ctx, debug_list,
3210 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
3211 &pf->hw.debug_mask, 0, "Shared code debug message level");
3213 SYSCTL_ADD_UINT(ctx, debug_list,
3214 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
3215 &pf->dbg_mask, 0, "Non-shared code debug message level");
3217 SYSCTL_ADD_PROC(ctx, debug_list,
3218 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
3219 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
3221 SYSCTL_ADD_PROC(ctx, debug_list,
3222 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
3223 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
3225 SYSCTL_ADD_PROC(ctx, debug_list,
3226 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
3227 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
3229 SYSCTL_ADD_PROC(ctx, debug_list,
3230 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
3231 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
3233 SYSCTL_ADD_PROC(ctx, debug_list,
3234 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
3235 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
3237 SYSCTL_ADD_PROC(ctx, debug_list,
3238 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
3239 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
3241 SYSCTL_ADD_PROC(ctx, debug_list,
3242 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
3243 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
3245 SYSCTL_ADD_PROC(ctx, debug_list,
3246 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
3247 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
3249 SYSCTL_ADD_PROC(ctx, debug_list,
3250 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
3251 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
3253 SYSCTL_ADD_PROC(ctx, debug_list,
3254 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
3255 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
3257 SYSCTL_ADD_PROC(ctx, debug_list,
3258 OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
3259 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
3261 SYSCTL_ADD_PROC(ctx, debug_list,
3262 OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
3263 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
3265 SYSCTL_ADD_PROC(ctx, debug_list,
3266 OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
3267 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
3269 SYSCTL_ADD_PROC(ctx, debug_list,
3270 OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR,
3271 pf, 0, ixl_sysctl_do_emp_reset, "I",
3272 "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
3274 SYSCTL_ADD_PROC(ctx, debug_list,
3275 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
3276 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
3279 SYSCTL_ADD_PROC(ctx, debug_list,
3280 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3281 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
3283 SYSCTL_ADD_PROC(ctx, debug_list,
3284 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
3285 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
3287 SYSCTL_ADD_PROC(ctx, debug_list,
3288 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
3289 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
3294 * Primarily for finding out how many queues can be assigned to VFs,
3298 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
3300 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3303 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
3305 return sysctl_handle_int(oidp, NULL, queues, req);
3309 ** Set flow control using sysctl:
3316 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
3318 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3319 struct i40e_hw *hw = &pf->hw;
3320 device_t dev = pf->dev;
3321 int requested_fc, error = 0;
3322 enum i40e_status_code aq_error = 0;
3326 requested_fc = pf->fc;
3327 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
3328 if ((error) || (req->newptr == NULL))
3330 if (requested_fc < 0 || requested_fc > 3) {
3332 "Invalid fc mode; valid modes are 0 through 3\n");
3336 /* Set fc ability for port */
3337 hw->fc.requested_mode = requested_fc;
3338 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
3341 "%s: Error setting new fc mode %d; fc_err %#x\n",
3342 __func__, aq_error, fc_aq_err);
3345 pf->fc = requested_fc;
3351 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
3365 switch (link_speed) {
3366 case I40E_LINK_SPEED_100MB:
3369 case I40E_LINK_SPEED_1GB:
3372 case I40E_LINK_SPEED_10GB:
3375 case I40E_LINK_SPEED_40GB:
3378 case I40E_LINK_SPEED_20GB:
3381 case I40E_LINK_SPEED_25GB:
3384 case I40E_LINK_SPEED_UNKNOWN:
3390 return speeds[index];
3394 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3396 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3397 struct i40e_hw *hw = &pf->hw;
3400 ixl_update_link_status(pf);
3402 error = sysctl_handle_string(oidp,
3403 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
3409 * Converts 8-bit speeds value to and from sysctl flags and
3410 * Admin Queue flags.
3413 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
3415 static u16 speedmap[6] = {
3416 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
3417 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
3418 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
3419 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
3420 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
3421 (I40E_LINK_SPEED_40GB | (0x20 << 8))
3425 for (int i = 0; i < 6; i++) {
3427 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
3429 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
3436 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
3438 struct i40e_hw *hw = &pf->hw;
3439 device_t dev = pf->dev;
3440 struct i40e_aq_get_phy_abilities_resp abilities;
3441 struct i40e_aq_set_phy_config config;
3442 enum i40e_status_code aq_error = 0;
3444 /* Get current capability information */
3445 aq_error = i40e_aq_get_phy_capabilities(hw,
3446 FALSE, FALSE, &abilities, NULL);
3449 "%s: Error getting phy capabilities %d,"
3450 " aq error: %d\n", __func__, aq_error,
3451 hw->aq.asq_last_status);
3455 /* Prepare new config */
3456 bzero(&config, sizeof(config));
3458 config.link_speed = speeds;
3460 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
3461 config.phy_type = abilities.phy_type;
3462 config.phy_type_ext = abilities.phy_type_ext;
3463 config.abilities = abilities.abilities
3464 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3465 config.eee_capability = abilities.eee_capability;
3466 config.eeer = abilities.eeer_val;
3467 config.low_power_ctrl = abilities.d3_lpan;
3468 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
3470 /* Do aq command & restart link */
3471 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3474 "%s: Error setting new phy config %d,"
3475 " aq error: %d\n", __func__, aq_error,
3476 hw->aq.asq_last_status);
3484 ** Supported link speedsL
3494 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
3496 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3497 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
3499 return sysctl_handle_int(oidp, NULL, supported, req);
3503 ** Control link advertise speed:
3505 ** 0x1 - advertise 100 Mb
3506 ** 0x2 - advertise 1G
3507 ** 0x4 - advertise 10G
3508 ** 0x8 - advertise 20G
3509 ** 0x10 - advertise 25G
3510 ** 0x20 - advertise 40G
3512 ** Set to 0 to disable link
3515 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
3517 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3518 device_t dev = pf->dev;
3519 u8 converted_speeds;
3520 int requested_ls = 0;
3523 /* Read in new mode */
3524 requested_ls = pf->advertised_speed;
3525 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3526 if ((error) || (req->newptr == NULL))
3529 /* Error out if bits outside of possible flag range are set */
3530 if ((requested_ls & ~((u8)0x3F)) != 0) {
3531 device_printf(dev, "Input advertised speed out of range; "
3532 "valid flags are: 0x%02x\n",
3533 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3537 /* Check if adapter supports input value */
3538 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3539 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3540 device_printf(dev, "Invalid advertised speed; "
3541 "valid flags are: 0x%02x\n",
3542 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3546 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3550 pf->advertised_speed = requested_ls;
3551 ixl_update_link_status(pf);
3556 ** Get the width and transaction speed of
3557 ** the bus this adapter is plugged into.
3560 ixl_get_bus_info(struct ixl_pf *pf)
3562 struct i40e_hw *hw = &pf->hw;
3563 device_t dev = pf->dev;
3565 u32 offset, num_ports;
3568 /* Some devices don't use PCIE */
3569 if (hw->mac.type == I40E_MAC_X722)
3572 /* Read PCI Express Capabilities Link Status Register */
3573 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3574 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3576 /* Fill out hw struct with PCIE info */
3577 i40e_set_pci_config_data(hw, link);
3579 /* Use info to print out bandwidth messages */
3580 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3581 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3582 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3583 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3584 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3585 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3586 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3587 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3591 * If adapter is in slot with maximum supported speed,
3592 * no warning message needs to be printed out.
3594 if (hw->bus.speed >= i40e_bus_speed_8000
3595 && hw->bus.width >= i40e_bus_width_pcie_x8)
3598 num_ports = bitcount32(hw->func_caps.valid_functions);
3599 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3601 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3602 device_printf(dev, "PCI-Express bandwidth available"
3603 " for this device may be insufficient for"
3604 " optimal performance.\n");
3605 device_printf(dev, "Please move the device to a different"
3606 " PCI-e link with more lanes and/or higher"
3607 " transfer rate.\n");
3612 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3614 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3615 struct i40e_hw *hw = &pf->hw;
3618 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3619 ixl_nvm_version_str(hw, sbuf);
3627 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3629 if ((nvma->command == I40E_NVM_READ) &&
3630 ((nvma->config & 0xFF) == 0xF) &&
3631 (((nvma->config & 0xF00) >> 8) == 0xF) &&
3632 (nvma->offset == 0) &&
3633 (nvma->data_size == 1)) {
3634 // device_printf(dev, "- Get Driver Status Command\n");
3636 else if (nvma->command == I40E_NVM_READ) {
3640 switch (nvma->command) {
3642 device_printf(dev, "- command: I40E_NVM_READ\n");
3645 device_printf(dev, "- command: I40E_NVM_WRITE\n");
3648 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
3652 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
3653 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
3654 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
3655 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
3660 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3662 struct i40e_hw *hw = &pf->hw;
3663 struct i40e_nvm_access *nvma;
3664 device_t dev = pf->dev;
3665 enum i40e_status_code status = 0;
3668 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3671 if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
3672 ifd->ifd_data == NULL) {
3673 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3675 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3676 __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
3677 device_printf(dev, "%s: data pointer: %p\n", __func__,
3682 nvma = (struct i40e_nvm_access *)ifd->ifd_data;
3684 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3685 ixl_print_nvm_cmd(dev, nvma);
3687 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
3689 while (count++ < 100) {
3690 i40e_msec_delay(100);
3691 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
3696 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) {
3697 // TODO: Might need a different lock here
3699 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3700 // IXL_PF_UNLOCK(pf);
3705 /* Let the nvmupdate report errors, show them only when debug is enabled */
3706 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3707 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3708 i40e_stat_str(hw, status), perrno);
3711 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3712 * to run this ioctl again. So use -EACCES for -EPERM instead.
3714 if (perrno == -EPERM)
3721 ixl_find_i2c_interface(struct ixl_pf *pf)
3723 struct i40e_hw *hw = &pf->hw;
3724 bool i2c_en, port_matched;
3727 for (int i = 0; i < 4; i++) {
3728 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3729 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3730 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3731 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3733 if (i2c_en && port_matched)
3741 ixl_phy_type_string(u32 bit_pos, bool ext)
3743 static char * phy_types_str[32] = {
3773 "1000BASE-T Optical",
3777 static char * ext_phy_types_str[8] = {
3788 if (ext && bit_pos > 7) return "Invalid_Ext";
3789 if (bit_pos > 31) return "Invalid";
3791 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3794 /* TODO: ERJ: I don't this is necessary anymore. */
3796 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3798 device_t dev = pf->dev;
3799 struct i40e_hw *hw = &pf->hw;
3800 struct i40e_aq_desc desc;
3801 enum i40e_status_code status;
3803 struct i40e_aqc_get_link_status *aq_link_status =
3804 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3806 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3807 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3808 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3811 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3812 __func__, i40e_stat_str(hw, status),
3813 i40e_aq_str(hw, hw->aq.asq_last_status));
3817 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3822 ixl_phy_type_string_ls(u8 val)
3825 return ixl_phy_type_string(val - 0x1F, true);
3827 return ixl_phy_type_string(val, false);
3831 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3833 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3834 device_t dev = pf->dev;
3838 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3840 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3844 struct i40e_aqc_get_link_status link_status;
3845 error = ixl_aq_get_link_status(pf, &link_status);
3851 sbuf_printf(buf, "\n"
3852 "PHY Type : 0x%02x<%s>\n"
3854 "Link info: 0x%02x\n"
3855 "AN info : 0x%02x\n"
3856 "Ext info : 0x%02x\n"
3857 "Loopback : 0x%02x\n"
3861 link_status.phy_type,
3862 ixl_phy_type_string_ls(link_status.phy_type),
3863 link_status.link_speed,
3864 link_status.link_info,
3865 link_status.an_info,
3866 link_status.ext_info,
3867 link_status.loopback,
3868 link_status.max_frame_size,
3870 link_status.power_desc);
3872 error = sbuf_finish(buf);
3874 device_printf(dev, "Error finishing sbuf: %d\n", error);
3881 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3883 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3884 struct i40e_hw *hw = &pf->hw;
3885 device_t dev = pf->dev;
3886 enum i40e_status_code status;
3887 struct i40e_aq_get_phy_abilities_resp abilities;
3891 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3893 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3897 status = i40e_aq_get_phy_capabilities(hw,
3898 FALSE, FALSE, &abilities, NULL);
3901 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3902 __func__, i40e_stat_str(hw, status),
3903 i40e_aq_str(hw, hw->aq.asq_last_status));
3908 sbuf_printf(buf, "\n"
3910 abilities.phy_type);
3912 if (abilities.phy_type != 0) {
3913 sbuf_printf(buf, "<");
3914 for (int i = 0; i < 32; i++)
3915 if ((1 << i) & abilities.phy_type)
3916 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3917 sbuf_printf(buf, ">\n");
3920 sbuf_printf(buf, "PHY Ext : %02x",
3921 abilities.phy_type_ext);
3923 if (abilities.phy_type_ext != 0) {
3924 sbuf_printf(buf, "<");
3925 for (int i = 0; i < 4; i++)
3926 if ((1 << i) & abilities.phy_type_ext)
3927 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
3928 sbuf_printf(buf, ">");
3930 sbuf_printf(buf, "\n");
3938 "ID : %02x %02x %02x %02x\n"
3939 "ModType : %02x %02x %02x\n"
3943 abilities.link_speed,
3944 abilities.abilities, abilities.eee_capability,
3945 abilities.eeer_val, abilities.d3_lpan,
3946 abilities.phy_id[0], abilities.phy_id[1],
3947 abilities.phy_id[2], abilities.phy_id[3],
3948 abilities.module_type[0], abilities.module_type[1],
3949 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3950 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3951 abilities.ext_comp_code);
3953 error = sbuf_finish(buf);
3955 device_printf(dev, "Error finishing sbuf: %d\n", error);
3962 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3964 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3965 struct ixl_vsi *vsi = &pf->vsi;
3966 struct ixl_mac_filter *f;
3967 device_t dev = pf->dev;
3968 int error = 0, ftl_len = 0, ftl_counter = 0;
3972 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3974 device_printf(dev, "Could not allocate sbuf for output.\n");
3978 sbuf_printf(buf, "\n");
3980 /* Print MAC filters */
3981 sbuf_printf(buf, "PF Filters:\n");
3982 SLIST_FOREACH(f, &vsi->ftl, next)
3986 sbuf_printf(buf, "(none)\n");
3988 SLIST_FOREACH(f, &vsi->ftl, next) {
3990 MAC_FORMAT ", vlan %4d, flags %#06x",
3991 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3992 /* don't print '\n' for last entry */
3993 if (++ftl_counter != ftl_len)
3994 sbuf_printf(buf, "\n");
3999 /* TODO: Give each VF its own filter list sysctl */
4001 if (pf->num_vfs > 0) {
4002 sbuf_printf(buf, "\n\n");
4003 for (int i = 0; i < pf->num_vfs; i++) {
4005 if (!(vf->vf_flags & VF_FLAG_ENABLED))
4009 ftl_len = 0, ftl_counter = 0;
4010 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
4011 SLIST_FOREACH(f, &vsi->ftl, next)
4015 sbuf_printf(buf, "(none)\n");
4017 SLIST_FOREACH(f, &vsi->ftl, next) {
4019 MAC_FORMAT ", vlan %4d, flags %#06x\n",
4020 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4027 error = sbuf_finish(buf);
4029 device_printf(dev, "Error finishing sbuf: %d\n", error);
4035 #define IXL_SW_RES_SIZE 0x14
4037 ixl_res_alloc_cmp(const void *a, const void *b)
4039 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4040 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4041 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4043 return ((int)one->resource_type - (int)two->resource_type);
4047 * Longest string length: 25
4050 ixl_switch_res_type_string(u8 type)
4052 // TODO: This should be changed to static const
4053 char * ixl_switch_res_type_strings[0x14] = {
4056 "Perfect Match MAC address",
4059 "Multicast hash entry",
4060 "Unicast hash entry",
4064 "VLAN Statistic Pool",
4067 "Inner VLAN Forward filter",
4077 return ixl_switch_res_type_strings[type];
4079 return "(Reserved)";
4083 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4085 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4086 struct i40e_hw *hw = &pf->hw;
4087 device_t dev = pf->dev;
4089 enum i40e_status_code status;
4093 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4095 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4097 device_printf(dev, "Could not allocate sbuf for output.\n");
4101 bzero(resp, sizeof(resp));
4102 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4108 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
4109 __func__, i40e_stat_str(hw, status),
4110 i40e_aq_str(hw, hw->aq.asq_last_status));
4115 /* Sort entries by type for display */
4116 qsort(resp, num_entries,
4117 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4118 &ixl_res_alloc_cmp);
4120 sbuf_cat(buf, "\n");
4121 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4123 " Type | Guaranteed | Total | Used | Un-allocated\n"
4124 " | (this) | (all) | (this) | (all) \n");
4125 for (int i = 0; i < num_entries; i++) {
4127 "%25s | %10d %5d %6d %12d",
4128 ixl_switch_res_type_string(resp[i].resource_type),
4132 resp[i].total_unalloced);
4133 if (i < num_entries - 1)
4134 sbuf_cat(buf, "\n");
4137 error = sbuf_finish(buf);
4139 device_printf(dev, "Error finishing sbuf: %d\n", error);
4146 ** Caller must init and delete sbuf; this function will clear and
4147 ** finish it for caller.
4150 ixl_switch_element_string(struct sbuf *s,
4151 struct i40e_aqc_switch_config_element_resp *element)
4155 switch (element->element_type) {
4156 case I40E_AQ_SW_ELEM_TYPE_MAC:
4157 sbuf_printf(s, "MAC %3d", element->element_info);
4159 case I40E_AQ_SW_ELEM_TYPE_PF:
4160 sbuf_printf(s, "PF %3d", element->element_info);
4162 case I40E_AQ_SW_ELEM_TYPE_VF:
4163 sbuf_printf(s, "VF %3d", element->element_info);
4165 case I40E_AQ_SW_ELEM_TYPE_EMP:
4168 case I40E_AQ_SW_ELEM_TYPE_BMC:
4171 case I40E_AQ_SW_ELEM_TYPE_PV:
4174 case I40E_AQ_SW_ELEM_TYPE_VEB:
4177 case I40E_AQ_SW_ELEM_TYPE_PA:
4180 case I40E_AQ_SW_ELEM_TYPE_VSI:
4181 sbuf_printf(s, "VSI %3d", element->element_info);
4189 return sbuf_data(s);
4193 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4195 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4196 struct i40e_hw *hw = &pf->hw;
4197 device_t dev = pf->dev;
4200 enum i40e_status_code status;
4203 u8 aq_buf[I40E_AQ_LARGE_BUF];
4205 struct i40e_aqc_get_switch_config_resp *sw_config;
4206 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4208 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4210 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4214 status = i40e_aq_get_switch_config(hw, sw_config,
4215 sizeof(aq_buf), &next, NULL);
4218 "%s: aq_get_switch_config() error %s, aq error %s\n",
4219 __func__, i40e_stat_str(hw, status),
4220 i40e_aq_str(hw, hw->aq.asq_last_status));
4225 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
4228 nmbuf = sbuf_new_auto();
4230 device_printf(dev, "Could not allocate sbuf for name output.\n");
4235 sbuf_cat(buf, "\n");
4236 /* Assuming <= 255 elements in switch */
4237 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
4238 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
4240 ** Revision -- all elements are revision 1 for now
4243 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4244 " | | | (uplink)\n");
4245 for (int i = 0; i < sw_config->header.num_reported; i++) {
4246 // "%4d (%8s) | %8s %8s %#8x",
4247 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4249 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4250 &sw_config->element[i]));
4251 sbuf_cat(buf, " | ");
4252 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
4254 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
4256 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4257 if (i < sw_config->header.num_reported - 1)
4258 sbuf_cat(buf, "\n");
4262 error = sbuf_finish(buf);
4264 device_printf(dev, "Error finishing sbuf: %d\n", error);
4272 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4274 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4275 struct i40e_hw *hw = &pf->hw;
4276 device_t dev = pf->dev;
4279 enum i40e_status_code status;
4282 struct i40e_aqc_get_set_rss_key_data key_data;
4284 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4286 device_printf(dev, "Could not allocate sbuf for output.\n");
4290 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
4292 sbuf_cat(buf, "\n");
4293 if (hw->mac.type == I40E_MAC_X722) {
4294 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4296 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4297 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4299 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4300 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4301 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4305 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4307 error = sbuf_finish(buf);
4309 device_printf(dev, "Error finishing sbuf: %d\n", error);
4316 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4321 if (length < 1 || buf == NULL) return;
4323 int byte_stride = 16;
4324 int lines = length / byte_stride;
4325 int rem = length % byte_stride;
4329 for (i = 0; i < lines; i++) {
4330 width = (rem > 0 && i == lines - 1)
4331 ? rem : byte_stride;
4333 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4335 for (j = 0; j < width; j++)
4336 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4338 if (width < byte_stride) {
4339 for (k = 0; k < (byte_stride - width); k++)
4340 sbuf_printf(sb, " ");
4344 sbuf_printf(sb, "\n");
4348 for (j = 0; j < width; j++) {
4349 c = (char)buf[i * byte_stride + j];
4350 if (c < 32 || c > 126)
4351 sbuf_printf(sb, ".");
4353 sbuf_printf(sb, "%c", c);
4356 sbuf_printf(sb, "\n");
4362 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4364 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4365 struct i40e_hw *hw = &pf->hw;
4366 device_t dev = pf->dev;
4369 enum i40e_status_code status;
4373 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4375 device_printf(dev, "Could not allocate sbuf for output.\n");
4379 bzero(hlut, sizeof(hlut));
4380 sbuf_cat(buf, "\n");
4381 if (hw->mac.type == I40E_MAC_X722) {
4382 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4384 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4385 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4387 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4388 reg = rd32(hw, I40E_PFQF_HLUT(i));
4389 bcopy(®, &hlut[i << 2], 4);
4392 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4394 error = sbuf_finish(buf);
4396 device_printf(dev, "Error finishing sbuf: %d\n", error);
4403 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4405 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4406 struct i40e_hw *hw = &pf->hw;
4409 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4410 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4412 return sysctl_handle_long(oidp, NULL, hena, req);
4416 * Sysctl to disable firmware's link management
4418 * 1 - Disable link management on this port
4419 * 0 - Re-enable link management
4421 * On normal NVMs, firmware manages link by default.
4424 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4426 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4427 struct i40e_hw *hw = &pf->hw;
4428 device_t dev = pf->dev;
4429 int requested_mode = -1;
4430 enum i40e_status_code status = 0;
4433 /* Read in new mode */
4434 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4435 if ((error) || (req->newptr == NULL))
4437 /* Check for sane value */
4438 if (requested_mode < 0 || requested_mode > 1) {
4439 device_printf(dev, "Valid modes are 0 or 1\n");
4444 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4447 "%s: Error setting new phy debug mode %s,"
4448 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4449 i40e_aq_str(hw, hw->aq.asq_last_status));
4457 * Read some diagnostic data from an SFP module
4458 * Bytes 96-99, 102-105 from device address 0xA2
4461 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4463 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4464 device_t dev = pf->dev;
4469 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4471 device_printf(dev, "Error reading from i2c\n");
4474 if (output != 0x3) {
4475 device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
4479 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4480 if (!(output & 0x60)) {
4481 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4485 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4487 for (u8 offset = 96; offset < 100; offset++) {
4488 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4489 sbuf_printf(sbuf, "%02X ", output);
4491 for (u8 offset = 102; offset < 106; offset++) {
4492 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4493 sbuf_printf(sbuf, "%02X ", output);
4503 * Sysctl to read a byte from I2C bus.
4505 * Input: 32-bit value:
4506 * bits 0-7: device address (0xA0 or 0xA2)
4507 * bits 8-15: offset (0-255)
4508 * bits 16-31: unused
4509 * Output: 8-bit value read
4512 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4514 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4515 device_t dev = pf->dev;
4516 int input = -1, error = 0;
4517 u8 dev_addr, offset, output;
4519 /* Read in I2C read parameters */
4520 error = sysctl_handle_int(oidp, &input, 0, req);
4521 if ((error) || (req->newptr == NULL))
4523 /* Validate device address */
4524 dev_addr = input & 0xFF;
4525 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4528 offset = (input >> 8) & 0xFF;
4530 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4534 device_printf(dev, "%02X\n", output);
4539 * Sysctl to write a byte to the I2C bus.
4541 * Input: 32-bit value:
4542 * bits 0-7: device address (0xA0 or 0xA2)
4543 * bits 8-15: offset (0-255)
4544 * bits 16-23: value to write
4545 * bits 24-31: unused
4546 * Output: 8-bit value written
4549 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4551 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4552 device_t dev = pf->dev;
4553 int input = -1, error = 0;
4554 u8 dev_addr, offset, value;
4556 /* Read in I2C write parameters */
4557 error = sysctl_handle_int(oidp, &input, 0, req);
4558 if ((error) || (req->newptr == NULL))
4560 /* Validate device address */
4561 dev_addr = input & 0xFF;
4562 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4565 offset = (input >> 8) & 0xFF;
4566 value = (input >> 16) & 0xFF;
4568 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4572 device_printf(dev, "%02X written\n", value);
4577 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4578 u8 bit_pos, int *is_set)
4580 device_t dev = pf->dev;
4581 struct i40e_hw *hw = &pf->hw;
4582 enum i40e_status_code status;
4584 status = i40e_aq_get_phy_capabilities(hw,
4585 FALSE, FALSE, abilities, NULL);
4588 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4589 __func__, i40e_stat_str(hw, status),
4590 i40e_aq_str(hw, hw->aq.asq_last_status));
4594 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4599 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4600 u8 bit_pos, int set)
4602 device_t dev = pf->dev;
4603 struct i40e_hw *hw = &pf->hw;
4604 struct i40e_aq_set_phy_config config;
4605 enum i40e_status_code status;
4607 /* Set new PHY config */
4608 memset(&config, 0, sizeof(config));
4609 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4611 config.fec_config |= bit_pos;
4612 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4613 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4614 config.phy_type = abilities->phy_type;
4615 config.phy_type_ext = abilities->phy_type_ext;
4616 config.link_speed = abilities->link_speed;
4617 config.eee_capability = abilities->eee_capability;
4618 config.eeer = abilities->eeer_val;
4619 config.low_power_ctrl = abilities->d3_lpan;
4620 status = i40e_aq_set_phy_config(hw, &config, NULL);
4624 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4625 __func__, i40e_stat_str(hw, status),
4626 i40e_aq_str(hw, hw->aq.asq_last_status));
4635 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4637 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4638 int mode, error = 0;
4640 struct i40e_aq_get_phy_abilities_resp abilities;
4641 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4644 /* Read in new mode */
4645 error = sysctl_handle_int(oidp, &mode, 0, req);
4646 if ((error) || (req->newptr == NULL))
4649 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4653 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4655 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4656 int mode, error = 0;
4658 struct i40e_aq_get_phy_abilities_resp abilities;
4659 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4662 /* Read in new mode */
4663 error = sysctl_handle_int(oidp, &mode, 0, req);
4664 if ((error) || (req->newptr == NULL))
4667 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4671 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4673 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4674 int mode, error = 0;
4676 struct i40e_aq_get_phy_abilities_resp abilities;
4677 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4680 /* Read in new mode */
4681 error = sysctl_handle_int(oidp, &mode, 0, req);
4682 if ((error) || (req->newptr == NULL))
4685 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4689 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4691 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4692 int mode, error = 0;
4694 struct i40e_aq_get_phy_abilities_resp abilities;
4695 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4698 /* Read in new mode */
4699 error = sysctl_handle_int(oidp, &mode, 0, req);
4700 if ((error) || (req->newptr == NULL))
4703 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4707 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4709 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4710 int mode, error = 0;
4712 struct i40e_aq_get_phy_abilities_resp abilities;
4713 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4716 /* Read in new mode */
4717 error = sysctl_handle_int(oidp, &mode, 0, req);
4718 if ((error) || (req->newptr == NULL))
4721 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4725 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4727 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4728 struct i40e_hw *hw = &pf->hw;
4729 device_t dev = pf->dev;
4732 enum i40e_status_code status;
4734 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4736 device_printf(dev, "Could not allocate sbuf for output.\n");
4741 /* This amount is only necessary if reading the entire cluster into memory */
4742 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4743 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
4744 if (final_buff == NULL) {
4745 device_printf(dev, "Could not allocate memory for output.\n");
4748 int final_buff_len = 0;
4754 u16 curr_buff_size = 4096;
4755 u8 curr_next_table = 0;
4756 u32 curr_next_index = 0;
4762 sbuf_cat(buf, "\n");
4765 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4766 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4768 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4769 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4773 /* copy info out of temp buffer */
4774 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4775 final_buff_len += ret_buff_size;
4777 if (ret_next_table != curr_next_table) {
4778 /* We're done with the current table; we can dump out read data. */
4779 sbuf_printf(buf, "%d:", curr_next_table);
4780 int bytes_printed = 0;
4781 while (bytes_printed <= final_buff_len) {
4782 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4783 bytes_printed += 16;
4785 sbuf_cat(buf, "\n");
4787 /* The entire cluster has been read; we're finished */
4788 if (ret_next_table == 0xFF)
4791 /* Otherwise clear the output buffer and continue reading */
4792 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4796 if (ret_next_index == 0xFFFFFFFF)
4799 bzero(dump_buf, sizeof(dump_buf));
4800 curr_next_table = ret_next_table;
4801 curr_next_index = ret_next_index;
4805 free(final_buff, M_DEVBUF);
4807 error = sbuf_finish(buf);
4809 device_printf(dev, "Error finishing sbuf: %d\n", error);
4816 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4818 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4819 struct i40e_hw *hw = &pf->hw;
4820 device_t dev = pf->dev;
4822 int state, new_state;
4823 enum i40e_status_code status;
4824 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4826 /* Read in new mode */
4827 error = sysctl_handle_int(oidp, &new_state, 0, req);
4828 if ((error) || (req->newptr == NULL))
4831 /* Already in requested state */
4832 if (new_state == state)
4835 if (new_state == 0) {
4836 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
4837 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
4841 if (pf->hw.aq.api_maj_ver < 1 ||
4842 (pf->hw.aq.api_maj_ver == 1 &&
4843 pf->hw.aq.api_min_ver < 7)) {
4844 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4848 i40e_aq_stop_lldp(&pf->hw, true, NULL);
4849 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
4850 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4852 status = i40e_aq_start_lldp(&pf->hw, NULL);
4853 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
4854 device_printf(dev, "FW LLDP agent is already running\n");
4855 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4862 * Get FW LLDP Agent status
4865 ixl_get_fw_lldp_status(struct ixl_pf *pf)
4867 enum i40e_status_code ret = I40E_SUCCESS;
4868 struct i40e_lldp_variables lldp_cfg;
4869 struct i40e_hw *hw = &pf->hw;
4872 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
4876 /* Get the LLDP AdminStatus for the current port */
4877 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
4880 /* Check if LLDP agent is disabled */
4882 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
4883 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4885 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4891 ixl_attach_get_link_status(struct ixl_pf *pf)
4893 struct i40e_hw *hw = &pf->hw;
4894 device_t dev = pf->dev;
4897 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4898 (hw->aq.fw_maj_ver < 4)) {
4899 i40e_msec_delay(75);
4900 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4902 device_printf(dev, "link restart failed, aq_err=%d\n",
4903 pf->hw.aq.asq_last_status);
4908 /* Determine link state */
4909 hw->phy.get_link_info = TRUE;
4910 i40e_get_link_status(hw, &pf->link_up);
4915 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4917 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4918 int requested = 0, error = 0;
4920 /* Read in new mode */
4921 error = sysctl_handle_int(oidp, &requested, 0, req);
4922 if ((error) || (req->newptr == NULL))
4925 /* Initiate the PF reset later in the admin task */
4926 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4932 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4934 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4935 struct i40e_hw *hw = &pf->hw;
4936 int requested = 0, error = 0;
4938 /* Read in new mode */
4939 error = sysctl_handle_int(oidp, &requested, 0, req);
4940 if ((error) || (req->newptr == NULL))
4943 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4949 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4951 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4952 struct i40e_hw *hw = &pf->hw;
4953 int requested = 0, error = 0;
4955 /* Read in new mode */
4956 error = sysctl_handle_int(oidp, &requested, 0, req);
4957 if ((error) || (req->newptr == NULL))
4960 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4966 ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
4968 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4969 struct i40e_hw *hw = &pf->hw;
4970 int requested = 0, error = 0;
4972 /* Read in new mode */
4973 error = sysctl_handle_int(oidp, &requested, 0, req);
4974 if ((error) || (req->newptr == NULL))
4977 /* TODO: Find out how to bypass this */
4978 if (!(rd32(hw, 0x000B818C) & 0x1)) {
4979 device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
4982 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
4988 * Print out mapping of TX queue indexes and Rx queue indexes
4992 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4994 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4995 struct ixl_vsi *vsi = &pf->vsi;
4996 device_t dev = pf->dev;
5000 struct ixl_rx_queue *rx_que = vsi->rx_queues;
5001 struct ixl_tx_queue *tx_que = vsi->tx_queues;
5003 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5005 device_printf(dev, "Could not allocate sbuf for output.\n");
5009 sbuf_cat(buf, "\n");
5010 for (int i = 0; i < vsi->num_rx_queues; i++) {
5011 rx_que = &vsi->rx_queues[i];
5012 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
5014 for (int i = 0; i < vsi->num_tx_queues; i++) {
5015 tx_que = &vsi->tx_queues[i];
5016 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
5019 error = sbuf_finish(buf);
5021 device_printf(dev, "Error finishing sbuf: %d\n", error);