1 /******************************************************************************
3 Copyright (c) 2013-2019, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
48 #include <dev/netmap/if_ixl_netmap.h>
49 #endif /* DEV_NETMAP */
51 static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
52 static u64 ixl_max_aq_speed_to_value(u8);
53 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
54 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
55 static enum i40e_status_code ixl_set_lla(struct ixl_vsi *);
56 static const char * ixl_link_speed_string(u8 link_speed);
60 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
88 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
89 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
93 extern int ixl_enable_iwarp;
94 extern int ixl_limit_iwarp_msix;
97 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
98 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
100 const char * const ixl_fc_string[6] = {
109 static char *ixl_fec_string[3] = {
111 "CL74 FC-FEC/BASE-R",
115 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
118 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
122 if (!(mask & pf->dbg_mask))
125 /* Re-implement device_printf() */
126 device_print_prettyname(pf->dev);
133 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
136 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
138 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
139 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
140 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
143 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
144 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
145 hw->aq.api_maj_ver, hw->aq.api_min_ver,
146 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
147 IXL_NVM_VERSION_HI_SHIFT,
148 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
149 IXL_NVM_VERSION_LO_SHIFT,
151 oem_ver, oem_build, oem_patch);
155 ixl_print_nvm_version(struct ixl_pf *pf)
157 struct i40e_hw *hw = &pf->hw;
158 device_t dev = pf->dev;
161 sbuf = sbuf_new_auto();
162 ixl_nvm_version_str(hw, sbuf);
164 device_printf(dev, "%s\n", sbuf_data(sbuf));
169 ixl_fw_recovery_mode(struct ixl_pf *pf)
171 return (rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK);
175 ixl_configure_tx_itr(struct ixl_pf *pf)
177 struct i40e_hw *hw = &pf->hw;
178 struct ixl_vsi *vsi = &pf->vsi;
179 struct ixl_queue *que = vsi->queues;
181 vsi->tx_itr_setting = pf->tx_itr;
183 for (int i = 0; i < vsi->num_queues; i++, que++) {
184 struct tx_ring *txr = &que->txr;
186 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
187 vsi->tx_itr_setting);
188 txr->itr = vsi->tx_itr_setting;
189 txr->latency = IXL_AVE_LATENCY;
194 ixl_configure_rx_itr(struct ixl_pf *pf)
196 struct i40e_hw *hw = &pf->hw;
197 struct ixl_vsi *vsi = &pf->vsi;
198 struct ixl_queue *que = vsi->queues;
200 vsi->rx_itr_setting = pf->rx_itr;
202 for (int i = 0; i < vsi->num_queues; i++, que++) {
203 struct rx_ring *rxr = &que->rxr;
205 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
206 vsi->rx_itr_setting);
207 rxr->itr = vsi->rx_itr_setting;
208 rxr->latency = IXL_AVE_LATENCY;
213 * Write PF ITR values to queue ITR registers.
216 ixl_configure_itr(struct ixl_pf *pf)
218 ixl_configure_tx_itr(pf);
219 ixl_configure_rx_itr(pf);
222 /*********************************************************************
225 * This routine is used in two ways. It is used by the stack as
226 * init entry point in network interface structure. It is also used
227 * by the driver as a hw/sw initialization routine to get to a
230 * return 0 on success, positive on failure
231 **********************************************************************/
233 ixl_init_locked(struct ixl_pf *pf)
235 struct i40e_hw *hw = &pf->hw;
236 struct ixl_vsi *vsi = &pf->vsi;
237 struct ifnet *ifp = vsi->ifp;
238 device_t dev = pf->dev;
239 struct i40e_filter_control_settings filter;
241 INIT_DEBUGOUT("ixl_init_locked: begin");
242 IXL_PF_LOCK_ASSERT(pf);
244 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
245 device_printf(dev, "Running in recovery mode, only firmware update available\n");
252 * If the aq is dead here, it probably means something outside of the driver
253 * did something to the adapter, like a PF reset.
254 * So rebuild the driver's state here if that occurs.
256 if (!i40e_check_asq_alive(&pf->hw)) {
257 device_printf(dev, "Admin Queue is down; resetting...\n");
258 ixl_teardown_hw_structs(pf);
262 /* Get the latest mac address... User might use a LAA */
263 if (ixl_set_lla(vsi)) {
264 device_printf(dev, "LLA address change failed!\n");
268 /* Set the various hardware offload abilities */
269 ifp->if_hwassist = 0;
270 if (ifp->if_capenable & IFCAP_TSO)
271 ifp->if_hwassist |= CSUM_TSO;
272 if (ifp->if_capenable & IFCAP_TXCSUM)
273 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
274 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
275 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
277 /* Set up the device filtering */
278 bzero(&filter, sizeof(filter));
279 filter.enable_ethtype = TRUE;
280 filter.enable_macvlan = TRUE;
281 filter.enable_fdir = FALSE;
282 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
283 if (i40e_set_filter_control(hw, &filter))
284 device_printf(dev, "i40e_set_filter_control() failed\n");
286 /* Prepare the VSI: rings, hmc contexts, etc... */
287 if (ixl_initialize_vsi(vsi)) {
288 device_printf(dev, "initialize vsi failed!!\n");
295 /* Set up MSI/X routing and the ITR settings */
297 ixl_configure_queue_intr_msix(pf);
298 ixl_configure_itr(pf);
300 ixl_configure_legacy(pf);
302 ixl_enable_rings(vsi);
304 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
306 ixl_reconfigure_filters(vsi);
308 /* Check if PROMISC or ALLMULTI flags have been set
309 * by user before bringing interface up */
310 ixl_set_promisc(vsi);
312 /* And now turn on interrupts */
313 ixl_enable_intr(vsi);
316 hw->phy.get_link_info = TRUE;
317 i40e_get_link_status(hw, &pf->link_up);
318 ixl_update_link_status(pf);
320 /* Now inform the stack we're ready */
321 ifp->if_drv_flags |= IFF_DRV_RUNNING;
324 if (ixl_enable_iwarp && pf->iw_enabled) {
325 int ret = ixl_iw_pf_init(pf);
328 "initialize iwarp failed, code %d\n", ret);
334 /*********************************************************************
336 * Get the hardware capabilities
338 **********************************************************************/
341 ixl_get_hw_capabilities(struct ixl_pf *pf)
343 struct i40e_aqc_list_capabilities_element_resp *buf;
344 struct i40e_hw *hw = &pf->hw;
345 device_t dev = pf->dev;
346 int error, len, i2c_intfc_num;
350 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
351 hw->func_caps.iwarp = 0;
355 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
357 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
358 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
359 device_printf(dev, "Unable to allocate cap memory\n");
363 /* This populates the hw struct */
364 error = i40e_aq_discover_capabilities(hw, buf, len,
365 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
367 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
369 /* retry once with a larger buffer */
373 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
374 device_printf(dev, "capability discovery failed: %d\n",
375 pf->hw.aq.asq_last_status);
380 device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
381 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
382 hw->pf_id, hw->func_caps.num_vfs,
383 hw->func_caps.num_msix_vectors,
384 hw->func_caps.num_msix_vectors_vf,
385 hw->func_caps.fd_filters_guaranteed,
386 hw->func_caps.fd_filters_best_effort,
387 hw->func_caps.num_tx_qp,
388 hw->func_caps.num_rx_qp,
389 hw->func_caps.base_queue);
392 * Some devices have both MDIO and I2C; since this isn't reported
393 * by the FW, check registers to see if an I2C interface exists.
395 i2c_intfc_num = ixl_find_i2c_interface(pf);
396 if (i2c_intfc_num != -1)
399 /* Determine functions to use for driver I2C accesses */
400 switch (pf->i2c_access_method) {
401 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
402 if (hw->mac.type == I40E_MAC_XL710 &&
403 hw->aq.api_maj_ver == 1 &&
404 hw->aq.api_min_ver >= 7) {
405 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
406 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
408 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
409 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
413 case IXL_I2C_ACCESS_METHOD_AQ:
414 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
415 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
417 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
418 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
419 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
421 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
422 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
423 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
426 /* Should not happen */
427 device_printf(dev, "Error setting I2C access functions\n");
431 /* Print a subset of the capability information. */
432 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
433 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
434 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
435 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
436 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
437 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
444 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
446 device_t dev = vsi->dev;
448 /* Enable/disable TXCSUM/TSO4 */
449 if (!(ifp->if_capenable & IFCAP_TXCSUM)
450 && !(ifp->if_capenable & IFCAP_TSO4)) {
451 if (mask & IFCAP_TXCSUM) {
452 ifp->if_capenable |= IFCAP_TXCSUM;
453 /* enable TXCSUM, restore TSO if previously enabled */
454 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
455 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
456 ifp->if_capenable |= IFCAP_TSO4;
459 else if (mask & IFCAP_TSO4) {
460 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
461 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
463 "TSO4 requires txcsum, enabling both...\n");
465 } else if((ifp->if_capenable & IFCAP_TXCSUM)
466 && !(ifp->if_capenable & IFCAP_TSO4)) {
467 if (mask & IFCAP_TXCSUM)
468 ifp->if_capenable &= ~IFCAP_TXCSUM;
469 else if (mask & IFCAP_TSO4)
470 ifp->if_capenable |= IFCAP_TSO4;
471 } else if((ifp->if_capenable & IFCAP_TXCSUM)
472 && (ifp->if_capenable & IFCAP_TSO4)) {
473 if (mask & IFCAP_TXCSUM) {
474 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
475 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
477 "TSO4 requires txcsum, disabling both...\n");
478 } else if (mask & IFCAP_TSO4)
479 ifp->if_capenable &= ~IFCAP_TSO4;
482 /* Enable/disable TXCSUM_IPV6/TSO6 */
483 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
484 && !(ifp->if_capenable & IFCAP_TSO6)) {
485 if (mask & IFCAP_TXCSUM_IPV6) {
486 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
487 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
488 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
489 ifp->if_capenable |= IFCAP_TSO6;
491 } else if (mask & IFCAP_TSO6) {
492 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
493 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
495 "TSO6 requires txcsum6, enabling both...\n");
497 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
498 && !(ifp->if_capenable & IFCAP_TSO6)) {
499 if (mask & IFCAP_TXCSUM_IPV6)
500 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
501 else if (mask & IFCAP_TSO6)
502 ifp->if_capenable |= IFCAP_TSO6;
503 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
504 && (ifp->if_capenable & IFCAP_TSO6)) {
505 if (mask & IFCAP_TXCSUM_IPV6) {
506 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
507 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
509 "TSO6 requires txcsum6, disabling both...\n");
510 } else if (mask & IFCAP_TSO6)
511 ifp->if_capenable &= ~IFCAP_TSO6;
515 /* For the set_advertise sysctl */
517 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
519 device_t dev = pf->dev;
522 /* Make sure to initialize the device to the complete list of
523 * supported speeds on driver load, to ensure unloading and
524 * reloading the driver will restore this value.
526 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
528 /* Non-fatal error */
529 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
534 pf->advertised_speed =
535 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
539 ixl_teardown_hw_structs(struct ixl_pf *pf)
541 enum i40e_status_code status = 0;
542 struct i40e_hw *hw = &pf->hw;
543 device_t dev = pf->dev;
545 /* Shutdown LAN HMC */
546 if (hw->hmc.hmc_obj) {
547 status = i40e_shutdown_lan_hmc(hw);
550 "init: LAN HMC shutdown failure; status %d\n", status);
555 /* Shutdown admin queue */
556 ixl_disable_intr0(hw);
557 status = i40e_shutdown_adminq(hw);
560 "init: Admin Queue shutdown failure; status %d\n", status);
567 ixl_reset(struct ixl_pf *pf)
569 struct i40e_hw *hw = &pf->hw;
570 device_t dev = pf->dev;
574 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
576 error = i40e_pf_reset(hw);
578 device_printf(dev, "init: PF reset failure\n");
583 error = i40e_init_adminq(hw);
585 device_printf(dev, "init: Admin queue init failure;"
586 " status code %d\n", error);
591 i40e_clear_pxe_mode(hw);
593 error = ixl_get_hw_capabilities(pf);
595 device_printf(dev, "init: Error retrieving HW capabilities;"
596 " status code %d\n", error);
600 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
601 hw->func_caps.num_rx_qp, 0, 0);
603 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
609 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
611 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
617 // XXX: possible fix for panic, but our failure recovery is still broken
618 error = ixl_switch_config(pf);
620 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
625 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
628 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
629 " aq_err %d\n", error, hw->aq.asq_last_status);
634 error = i40e_set_fc(hw, &set_fc_err_mask, true);
636 device_printf(dev, "init: setting link flow control failed; retcode %d,"
637 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
641 // XXX: (Rebuild VSIs?)
643 /* Firmware delay workaround */
644 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
645 (hw->aq.fw_maj_ver < 4)) {
647 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
649 device_printf(dev, "init: link restart failed, aq_err %d\n",
650 hw->aq.asq_last_status);
656 /* Re-enable admin queue interrupt */
658 ixl_configure_intr0_msix(pf);
659 ixl_enable_intr0(hw);
667 ** MSIX Interrupt Handlers and Tasklets
670 ixl_handle_que(void *context, int pending)
672 struct ixl_queue *que = context;
673 struct ixl_vsi *vsi = que->vsi;
674 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
675 struct i40e_hw *hw = vsi->hw;
676 struct tx_ring *txr = &que->txr;
677 struct ifnet *ifp = vsi->ifp;
680 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
681 more = ixl_rxeof(que, IXL_RX_LIMIT);
684 if (!drbr_empty(ifp, txr->br))
685 ixl_mq_start_locked(ifp, txr);
688 taskqueue_enqueue(que->tq, &que->task);
693 /* Re-enable queue interrupt */
695 ixl_enable_queue(hw, que->me);
697 ixl_enable_intr0(hw);
701 /*********************************************************************
703 * Legacy Interrupt Service routine
705 **********************************************************************/
709 struct ixl_pf *pf = arg;
710 struct i40e_hw *hw = &pf->hw;
711 struct ixl_vsi *vsi = &pf->vsi;
712 struct ixl_queue *que = vsi->queues;
713 struct ifnet *ifp = vsi->ifp;
714 struct tx_ring *txr = &que->txr;
718 ixl_disable_intr0(hw);
722 /* Clear PBA at start of ISR if using legacy interrupts */
724 wr32(hw, I40E_PFINT_DYN_CTL0,
725 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
726 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
728 icr0 = rd32(hw, I40E_PFINT_ICR0);
732 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
733 taskqueue_enqueue(pf->tq, &pf->vflr_task);
736 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
737 taskqueue_enqueue(pf->tq, &pf->adminq);
739 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
742 more = ixl_rxeof(que, IXL_RX_LIMIT);
746 if (!drbr_empty(vsi->ifp, txr->br))
747 ixl_mq_start_locked(ifp, txr);
751 taskqueue_enqueue(que->tq, &que->task);
754 ixl_enable_intr0(hw);
758 /*********************************************************************
760 * MSIX VSI Interrupt Service routine
762 **********************************************************************/
764 ixl_msix_que(void *arg)
766 struct ixl_queue *que = arg;
767 struct ixl_vsi *vsi = que->vsi;
768 struct i40e_hw *hw = vsi->hw;
769 struct tx_ring *txr = &que->txr;
770 bool more_tx, more_rx;
772 /* Protect against spurious interrupts */
773 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
776 /* There are drivers which disable auto-masking of interrupts,
777 * which is a global setting for all ports. We have to make sure
778 * to mask it to not lose IRQs */
779 ixl_disable_queue(hw, que->me);
783 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
786 more_tx = ixl_txeof(que);
788 ** Make certain that if the stack
789 ** has anything queued the task gets
790 ** scheduled to handle it.
792 if (!drbr_empty(vsi->ifp, txr->br))
796 ixl_set_queue_rx_itr(que);
797 ixl_set_queue_tx_itr(que);
799 if (more_tx || more_rx)
800 taskqueue_enqueue(que->tq, &que->task);
802 ixl_enable_queue(hw, que->me);
808 /*********************************************************************
810 * MSIX Admin Queue Interrupt Service routine
812 **********************************************************************/
814 ixl_msix_adminq(void *arg)
816 struct ixl_pf *pf = arg;
817 struct i40e_hw *hw = &pf->hw;
818 device_t dev = pf->dev;
819 u32 reg, mask, rstat_reg;
820 bool do_task = FALSE;
824 reg = rd32(hw, I40E_PFINT_ICR0);
825 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
827 /* Check on the cause */
828 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
829 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
833 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
834 ixl_handle_mdd_event(pf);
835 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
838 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
839 device_printf(dev, "Reset Requested!\n");
840 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
841 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
842 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
843 device_printf(dev, "Reset type: ");
845 /* These others might be handled similarly to an EMPR reset */
846 case I40E_RESET_CORER:
849 case I40E_RESET_GLOBR:
852 case I40E_RESET_EMPR:
854 atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
860 /* overload admin queue task to check reset progress */
864 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
865 device_printf(dev, "ECC Error detected!\n");
868 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
869 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
870 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
871 device_printf(dev, "HMC Error detected!\n");
872 device_printf(dev, "INFO 0x%08x\n", reg);
873 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
874 device_printf(dev, "DATA 0x%08x\n", reg);
875 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
879 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
880 device_printf(dev, "PCI Exception detected!\n");
884 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
885 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
886 taskqueue_enqueue(pf->tq, &pf->vflr_task);
891 taskqueue_enqueue(pf->tq, &pf->adminq);
893 ixl_enable_intr0(hw);
897 ixl_set_promisc(struct ixl_vsi *vsi)
899 struct ifnet *ifp = vsi->ifp;
900 struct i40e_hw *hw = vsi->hw;
902 bool uni = FALSE, multi = FALSE;
904 if (ifp->if_flags & IFF_PROMISC)
906 else if (ifp->if_flags & IFF_ALLMULTI)
908 else { /* Need to count the multicast addresses */
909 struct ifmultiaddr *ifma;
911 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
912 if (ifma->ifma_addr->sa_family != AF_LINK)
914 if (mcnt == MAX_MULTICAST_ADDR) {
920 if_maddr_runlock(ifp);
923 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
924 vsi->seid, uni, NULL, TRUE);
925 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
926 vsi->seid, multi, NULL);
930 /*********************************************************************
933 * Routines for multicast and vlan filter management.
935 *********************************************************************/
937 ixl_add_multi(struct ixl_vsi *vsi)
939 struct ifmultiaddr *ifma;
940 struct ifnet *ifp = vsi->ifp;
941 struct i40e_hw *hw = vsi->hw;
944 IOCTL_DEBUGOUT("ixl_add_multi: begin");
948 ** First just get a count, to decide if we
949 ** we simply use multicast promiscuous.
951 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
952 if (ifma->ifma_addr->sa_family != AF_LINK)
956 if_maddr_runlock(ifp);
958 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
959 /* delete existing MC filters */
960 ixl_del_hw_filters(vsi, mcnt);
961 i40e_aq_set_vsi_multicast_promiscuous(hw,
962 vsi->seid, TRUE, NULL);
968 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
969 if (ifma->ifma_addr->sa_family != AF_LINK)
971 ixl_add_mc_filter(vsi,
972 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
975 if_maddr_runlock(ifp);
977 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
978 ixl_add_hw_filters(vsi, flags, mcnt);
981 IOCTL_DEBUGOUT("ixl_add_multi: end");
986 ixl_del_multi(struct ixl_vsi *vsi)
988 struct ifnet *ifp = vsi->ifp;
989 struct ifmultiaddr *ifma;
990 struct ixl_mac_filter *f;
994 IOCTL_DEBUGOUT("ixl_del_multi: begin");
996 /* Search for removed multicast addresses */
998 SLIST_FOREACH(f, &vsi->ftl, next) {
999 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1001 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1002 if (ifma->ifma_addr->sa_family != AF_LINK)
1004 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1005 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1010 if (match == FALSE) {
1011 f->flags |= IXL_FILTER_DEL;
1016 if_maddr_runlock(ifp);
1019 ixl_del_hw_filters(vsi, mcnt);
1022 /*********************************************************************
1025 * This routine checks for link status, updates statistics,
1026 * and runs the watchdog check.
1028 **********************************************************************/
1031 ixl_local_timer(void *arg)
1033 struct ixl_pf *pf = arg;
1034 struct ifnet *ifp = pf->vsi.ifp;
1036 if (ixl_fw_recovery_mode(pf)) {
1037 if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
1038 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1039 ixl_stop_locked(pf);
1040 atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE | IXL_PF_STATE_EMPR_RESETTING);
1041 device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1045 IXL_PF_LOCK_ASSERT(pf);
1047 /* Fire off the adminq task */
1048 taskqueue_enqueue(pf->tq, &pf->adminq);
1050 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1052 ixl_update_stats_counters(pf);
1055 if (ixl_queue_hang_check(&pf->vsi)) {
1056 /* Increment stat when a queue shows hung */
1057 pf->watchdog_events++;
1060 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1064 ixl_link_up_msg(struct ixl_pf *pf)
1066 struct i40e_hw *hw = &pf->hw;
1067 struct ifnet *ifp = pf->vsi.ifp;
1068 char *req_fec_string, *neg_fec_string;
1071 fec_abilities = hw->phy.link_info.req_fec_info;
1072 /* If both RS and KR are requested, only show RS */
1073 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
1074 req_fec_string = ixl_fec_string[0];
1075 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
1076 req_fec_string = ixl_fec_string[1];
1078 req_fec_string = ixl_fec_string[2];
1080 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
1081 neg_fec_string = ixl_fec_string[0];
1082 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
1083 neg_fec_string = ixl_fec_string[1];
1085 neg_fec_string = ixl_fec_string[2];
1087 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
1089 ixl_link_speed_string(hw->phy.link_info.link_speed),
1090 req_fec_string, neg_fec_string,
1091 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1092 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1093 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1094 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1095 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1096 ixl_fc_string[1] : ixl_fc_string[0]);
1100 ** Note: this routine updates the OS on the link state
1101 ** the real check of the hardware only happens with
1102 ** a link interrupt.
1105 ixl_update_link_status(struct ixl_pf *pf)
1107 struct ixl_vsi *vsi = &pf->vsi;
1108 struct ifnet *ifp = vsi->ifp;
1109 device_t dev = pf->dev;
1112 if (vsi->link_active == FALSE) {
1113 vsi->link_active = TRUE;
1114 #if __FreeBSD_version >= 1100000
1115 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1117 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
1119 if_link_state_change(ifp, LINK_STATE_UP);
1120 ixl_link_up_msg(pf);
1122 ixl_broadcast_link_state(pf);
1125 } else { /* Link down */
1126 if (vsi->link_active == TRUE) {
1128 device_printf(dev, "Link is Down\n");
1129 if_link_state_change(ifp, LINK_STATE_DOWN);
1130 vsi->link_active = FALSE;
1132 ixl_broadcast_link_state(pf);
1138 /*********************************************************************
1140 * This routine disables all traffic on the adapter by issuing a
1141 * global reset on the MAC and deallocates TX/RX buffers.
1143 **********************************************************************/
1146 ixl_stop_locked(struct ixl_pf *pf)
1148 struct ixl_vsi *vsi = &pf->vsi;
1149 struct ifnet *ifp = vsi->ifp;
1151 INIT_DEBUGOUT("ixl_stop: begin\n");
1153 IXL_PF_LOCK_ASSERT(pf);
1155 /* Tell the stack that the interface is no longer active */
1156 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1159 /* Stop iWARP device */
1160 if (ixl_enable_iwarp && pf->iw_enabled)
1164 ixl_disable_rings_intr(vsi);
1165 ixl_disable_rings(vsi);
1169 ixl_stop(struct ixl_pf *pf)
1172 ixl_stop_locked(pf);
1176 /*********************************************************************
1178 * Setup MSIX Interrupt resources and handlers for the VSI
1180 **********************************************************************/
1182 ixl_setup_legacy(struct ixl_pf *pf)
1184 device_t dev = pf->dev;
1189 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1190 &rid, RF_SHAREABLE | RF_ACTIVE);
1191 if (pf->res == NULL) {
1192 device_printf(dev, "bus_alloc_resource_any() for"
1193 " legacy/msi interrupt\n");
1197 /* Set the handler function */
1198 error = bus_setup_intr(dev, pf->res,
1199 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1200 ixl_intr, pf, &pf->tag);
1203 device_printf(dev, "bus_setup_intr() for legacy/msi"
1204 " interrupt handler failed, error %d\n", error);
1207 error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1210 device_printf(dev, "bus_describe_intr() for Admin Queue"
1211 " interrupt name failed, error %d\n", error);
1218 ixl_setup_adminq_tq(struct ixl_pf *pf)
1220 device_t dev = pf->dev;
1223 /* Tasklet for Admin Queue interrupts */
1224 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1227 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1229 /* Create and start Admin Queue taskqueue */
1230 pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1231 taskqueue_thread_enqueue, &pf->tq);
1233 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1236 error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1237 device_get_nameunit(dev));
1239 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1241 taskqueue_free(pf->tq);
1248 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1250 struct ixl_queue *que = vsi->queues;
1251 device_t dev = vsi->dev;
1257 /* Create queue tasks and start queue taskqueues */
1258 for (int i = 0; i < vsi->num_queues; i++, que++) {
1259 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1260 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1261 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1262 taskqueue_thread_enqueue, &que->tq);
1264 CPU_SETOF(cpu_id, &cpu_mask);
1265 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1266 &cpu_mask, "%s (bucket %d)",
1267 device_get_nameunit(dev), cpu_id);
1269 taskqueue_start_threads(&que->tq, 1, PI_NET,
1270 "%s (que %d)", device_get_nameunit(dev), que->me);
1278 ixl_free_adminq_tq(struct ixl_pf *pf)
1281 taskqueue_free(pf->tq);
1287 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1289 struct ixl_queue *que = vsi->queues;
1291 for (int i = 0; i < vsi->num_queues; i++, que++) {
1293 taskqueue_free(que->tq);
1300 ixl_setup_adminq_msix(struct ixl_pf *pf)
1302 device_t dev = pf->dev;
1305 /* Admin IRQ rid is 1, vector is 0 */
1307 /* Get interrupt resource from bus */
1308 pf->res = bus_alloc_resource_any(dev,
1309 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1311 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1312 " interrupt failed [rid=%d]\n", rid);
1315 /* Then associate interrupt with handler */
1316 error = bus_setup_intr(dev, pf->res,
1317 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1318 ixl_msix_adminq, pf, &pf->tag);
1321 device_printf(dev, "bus_setup_intr() for Admin Queue"
1322 " interrupt handler failed, error %d\n", error);
1325 error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1328 device_printf(dev, "bus_describe_intr() for Admin Queue"
1329 " interrupt name failed, error %d\n", error);
1337 * Allocate interrupt resources from bus and associate an interrupt handler
1338 * to those for the VSI's queues.
1341 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1343 device_t dev = vsi->dev;
1344 struct ixl_queue *que = vsi->queues;
1345 struct tx_ring *txr;
1346 int error, rid, vector = 1;
1348 /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1349 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1353 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1354 RF_SHAREABLE | RF_ACTIVE);
1356 device_printf(dev, "bus_alloc_resource_any() for"
1357 " Queue %d interrupt failed [rid=%d]\n",
1361 /* Set the handler function */
1362 error = bus_setup_intr(dev, que->res,
1363 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1364 ixl_msix_que, que, &que->tag);
1366 device_printf(dev, "bus_setup_intr() for Queue %d"
1367 " interrupt handler failed, error %d\n",
1369 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1372 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1374 device_printf(dev, "bus_describe_intr() for Queue %d"
1375 " interrupt name failed, error %d\n",
1378 /* Bind the vector to a CPU */
1380 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1382 error = bus_bind_intr(dev, que->res, cpu_id);
1384 device_printf(dev, "bus_bind_intr() for Queue %d"
1385 " to CPU %d failed, error %d\n",
1386 que->me, cpu_id, error);
1395 * Allocate MSI/X vectors from the OS.
1396 * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1399 ixl_init_msix(struct ixl_pf *pf)
1401 device_t dev = pf->dev;
1402 struct i40e_hw *hw = &pf->hw;
1404 #if __FreeBSD_version >= 1100000
1408 int auto_max_queues;
1409 int rid, want, vectors, queues, available;
1411 int iw_want=0, iw_vectors;
1416 /* Override by tuneable */
1417 if (!pf->enable_msix)
1420 /* First try MSI/X */
1421 rid = PCIR_BAR(IXL_MSIX_BAR);
1422 pf->msix_mem = bus_alloc_resource_any(dev,
1423 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1424 if (!pf->msix_mem) {
1425 /* May not be enabled */
1426 device_printf(pf->dev,
1427 "Unable to map MSIX table\n");
1431 available = pci_msix_count(dev);
1432 if (available < 2) {
1433 /* system has msix disabled (0), or only one vector (1) */
1434 device_printf(pf->dev, "Less than two MSI-X vectors available\n");
1435 bus_release_resource(dev, SYS_RES_MEMORY,
1437 pf->msix_mem = NULL;
1441 /* Clamp max number of queues based on:
1442 * - # of MSI-X vectors available
1443 * - # of cpus available
1444 * - # of queues that can be assigned to the LAN VSI
1446 auto_max_queues = min(mp_ncpus, available - 1);
1447 if (hw->mac.type == I40E_MAC_X722)
1448 auto_max_queues = min(auto_max_queues, 128);
1450 auto_max_queues = min(auto_max_queues, 64);
1452 /* Override with tunable value if tunable is less than autoconfig count */
1453 if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1454 queues = pf->max_queues;
1455 /* Use autoconfig amount if that's lower */
1456 else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1457 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1458 "autoconfig amount (%d)...\n",
1459 pf->max_queues, auto_max_queues);
1460 queues = auto_max_queues;
1462 /* Limit maximum auto-configured queues to 8 if no user value is set */
1464 queues = min(auto_max_queues, 8);
1467 /* If we're doing RSS, clamp at the number of RSS buckets */
1468 if (queues > rss_getnumbuckets())
1469 queues = rss_getnumbuckets();
1473 ** Want one vector (RX/TX pair) per queue
1474 ** plus an additional for the admin queue.
1477 if (want <= available) /* Have enough */
1480 device_printf(pf->dev,
1481 "MSIX Configuration Problem, "
1482 "%d vectors available but %d wanted!\n",
1484 pf->msix_mem = NULL;
1485 goto no_msix; /* Will go to Legacy setup */
1489 if (ixl_enable_iwarp && hw->func_caps.iwarp) {
1490 #if __FreeBSD_version >= 1100000
1491 if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
1493 iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
1497 iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
1498 if(ixl_limit_iwarp_msix > 0)
1499 iw_want = min(iw_want, ixl_limit_iwarp_msix);
1501 iw_want = min(iw_want, 1);
1503 available -= vectors;
1504 if (available > 0) {
1505 iw_vectors = (available >= iw_want) ?
1506 iw_want : available;
1507 vectors += iw_vectors;
1513 ixl_set_msix_enable(dev);
1514 if (pci_alloc_msix(dev, &vectors) == 0) {
1515 device_printf(pf->dev,
1516 "Using MSIX interrupts with %d vectors\n", vectors);
1519 if (ixl_enable_iwarp && hw->func_caps.iwarp)
1521 pf->iw_msix = iw_vectors;
1522 device_printf(pf->dev,
1523 "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
1528 pf->vsi.num_queues = queues;
1531 * If we're doing RSS, the number of queues needs to
1532 * match the number of RSS buckets that are configured.
1534 * + If there's more queues than RSS buckets, we'll end
1535 * up with queues that get no traffic.
1537 * + If there's more RSS buckets than queues, we'll end
1538 * up having multiple RSS buckets map to the same queue,
1539 * so there'll be some contention.
1541 if (queues != rss_getnumbuckets()) {
1543 "%s: queues (%d) != RSS buckets (%d)"
1544 "; performance will be impacted.\n",
1545 __func__, queues, rss_getnumbuckets());
1551 vectors = pci_msi_count(dev);
1552 pf->vsi.num_queues = 1;
1554 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1555 device_printf(pf->dev, "Using an MSI interrupt\n");
1558 device_printf(pf->dev, "Using a Legacy interrupt\n");
1564 * Configure admin queue/misc interrupt cause registers in hardware.
1567 ixl_configure_intr0_msix(struct ixl_pf *pf)
1569 struct i40e_hw *hw = &pf->hw;
1572 /* First set up the adminq - vector 0 */
1573 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
1574 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
1576 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1577 I40E_PFINT_ICR0_ENA_GRST_MASK |
1578 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1579 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1580 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1581 I40E_PFINT_ICR0_ENA_VFLR_MASK |
1582 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
1583 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1584 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1587 * 0x7FF is the end of the queue list.
1588 * This means we won't use MSI-X vector 0 for a queue interrupt
1591 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1592 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1593 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1595 wr32(hw, I40E_PFINT_DYN_CTL0,
1596 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1597 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1599 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1603 * Configure queue interrupt cause registers in hardware.
1606 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1608 struct i40e_hw *hw = &pf->hw;
1609 struct ixl_vsi *vsi = &pf->vsi;
1613 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1614 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1615 /* First queue type is RX / 0 */
1616 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1618 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1619 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1620 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1621 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1622 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1623 wr32(hw, I40E_QINT_RQCTL(i), reg);
1625 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1626 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1627 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1628 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1629 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1630 wr32(hw, I40E_QINT_TQCTL(i), reg);
1635 * Configure for MSI single vector operation
1638 ixl_configure_legacy(struct ixl_pf *pf)
1640 struct i40e_hw *hw = &pf->hw;
1641 struct ixl_vsi *vsi = &pf->vsi;
1642 struct ixl_queue *que = vsi->queues;
1643 struct rx_ring *rxr = &que->rxr;
1644 struct tx_ring *txr = &que->txr;
1648 vsi->tx_itr_setting = pf->tx_itr;
1649 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1650 vsi->tx_itr_setting);
1651 txr->itr = vsi->tx_itr_setting;
1653 vsi->rx_itr_setting = pf->rx_itr;
1654 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1655 vsi->rx_itr_setting);
1656 rxr->itr = vsi->rx_itr_setting;
1658 /* Setup "other" causes */
1659 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1660 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1661 | I40E_PFINT_ICR0_ENA_GRST_MASK
1662 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1663 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1664 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1665 | I40E_PFINT_ICR0_ENA_VFLR_MASK
1666 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1668 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1670 /* No ITR for non-queue interrupts */
1671 wr32(hw, I40E_PFINT_STAT_CTL0,
1672 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1674 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1675 wr32(hw, I40E_PFINT_LNKLST0, 0);
1677 /* Associate the queue pair to the vector and enable the q int */
1678 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1679 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1680 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1681 wr32(hw, I40E_QINT_RQCTL(0), reg);
1683 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1684 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1685 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1686 wr32(hw, I40E_QINT_TQCTL(0), reg);
1690 ixl_allocate_pci_resources(struct ixl_pf *pf)
1693 struct i40e_hw *hw = &pf->hw;
1694 device_t dev = pf->dev;
1698 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1701 if (!(pf->pci_mem)) {
1702 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1705 /* Ensure proper PCI device operation */
1706 ixl_set_busmaster(dev);
1708 /* Save off the PCI information */
1709 hw->vendor_id = pci_get_vendor(dev);
1710 hw->device_id = pci_get_device(dev);
1711 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1712 hw->subsystem_vendor_id =
1713 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1714 hw->subsystem_device_id =
1715 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1717 hw->bus.device = pci_get_slot(dev);
1718 hw->bus.func = pci_get_function(dev);
1720 /* Save off register access information */
1721 pf->osdep.mem_bus_space_tag =
1722 rman_get_bustag(pf->pci_mem);
1723 pf->osdep.mem_bus_space_handle =
1724 rman_get_bushandle(pf->pci_mem);
1725 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1726 pf->osdep.flush_reg = I40E_GLGEN_STAT;
1727 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1729 pf->hw.back = &pf->osdep;
1735 * Teardown and release the admin queue/misc vector
1739 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1741 device_t dev = pf->dev;
1744 if (pf->admvec) /* we are doing MSIX */
1745 rid = pf->admvec + 1;
1747 (pf->msix != 0) ? (rid = 1):(rid = 0);
1749 if (pf->tag != NULL) {
1750 bus_teardown_intr(dev, pf->res, pf->tag);
1752 device_printf(dev, "bus_teardown_intr() for"
1753 " interrupt 0 failed\n");
1758 if (pf->res != NULL) {
1759 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1761 device_printf(dev, "bus_release_resource() for"
1762 " interrupt 0 failed [rid=%d]\n", rid);
1772 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1774 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1775 struct ixl_queue *que = vsi->queues;
1776 device_t dev = vsi->dev;
1779 /* We may get here before stations are setup */
1780 if ((pf->msix < 2) || (que == NULL))
1783 /* Release all MSIX queue resources */
1784 for (int i = 0; i < vsi->num_queues; i++, que++) {
1785 rid = que->msix + 1;
1786 if (que->tag != NULL) {
1787 error = bus_teardown_intr(dev, que->res, que->tag);
1789 device_printf(dev, "bus_teardown_intr() for"
1790 " Queue %d interrupt failed\n",
1796 if (que->res != NULL) {
1797 error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1799 device_printf(dev, "bus_release_resource() for"
1800 " Queue %d interrupt failed [rid=%d]\n",
1812 ixl_free_pci_resources(struct ixl_pf *pf)
1814 device_t dev = pf->dev;
1817 ixl_teardown_queue_msix(&pf->vsi);
1818 ixl_teardown_adminq_msix(pf);
1821 pci_release_msi(dev);
1823 memrid = PCIR_BAR(IXL_MSIX_BAR);
1825 if (pf->msix_mem != NULL)
1826 bus_release_resource(dev, SYS_RES_MEMORY,
1827 memrid, pf->msix_mem);
1829 if (pf->pci_mem != NULL)
1830 bus_release_resource(dev, SYS_RES_MEMORY,
1831 PCIR_BAR(0), pf->pci_mem);
1837 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1839 /* Display supported media types */
1840 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1841 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1843 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1844 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1845 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1846 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1847 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1848 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1850 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
1851 ifmedia_add(&vsi->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1853 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
1854 ifmedia_add(&vsi->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1856 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1857 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1858 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1859 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1861 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1862 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1863 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1864 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1865 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1866 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1868 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1869 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1870 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1871 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1872 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1873 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1874 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1875 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1876 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1877 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1879 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1880 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1882 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1883 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1884 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1885 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1886 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
1887 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1888 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1889 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1890 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1891 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1892 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1894 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1895 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1897 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1898 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1899 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1900 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1902 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1903 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1904 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1905 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1906 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1907 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1908 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1909 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1910 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1911 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1912 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1913 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1916 /*********************************************************************
1918 * Setup networking device structure and register an interface.
1920 **********************************************************************/
1922 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1924 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1926 struct i40e_hw *hw = vsi->hw;
1927 struct ixl_queue *que = vsi->queues;
1928 struct i40e_aq_get_phy_abilities_resp abilities;
1929 enum i40e_status_code aq_error = 0;
1931 INIT_DEBUGOUT("ixl_setup_interface: begin");
1933 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1935 device_printf(dev, "can not allocate ifnet structure\n");
1938 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1939 ifp->if_mtu = ETHERMTU;
1940 ifp->if_init = ixl_init;
1941 ifp->if_softc = vsi;
1942 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1943 ifp->if_ioctl = ixl_ioctl;
1945 #if __FreeBSD_version >= 1100036
1946 if_setgetcounterfn(ifp, ixl_get_counter);
1949 ifp->if_transmit = ixl_mq_start;
1951 ifp->if_qflush = ixl_qflush;
1953 ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1955 vsi->max_frame_size =
1956 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1957 + ETHER_VLAN_ENCAP_LEN;
1959 /* Set TSO limits */
1960 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1961 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1962 ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1965 * Tell the upper layer(s) we support long frames.
1967 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1969 ifp->if_capabilities |= IFCAP_HWCSUM;
1970 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1971 ifp->if_capabilities |= IFCAP_TSO;
1972 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1973 ifp->if_capabilities |= IFCAP_LRO;
1975 /* VLAN capabilties */
1976 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1979 | IFCAP_VLAN_HWCSUM;
1980 ifp->if_capenable = ifp->if_capabilities;
1983 ** Don't turn this on by default, if vlans are
1984 ** created on another pseudo device (eg. lagg)
1985 ** then vlan events are not passed thru, breaking
1986 ** operation, but with HW FILTER off it works. If
1987 ** using vlans directly on the ixl driver you can
1988 ** enable this and get full hardware tag filtering.
1990 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1993 * Specify the media types supported by this adapter and register
1994 * callbacks to update media and link information
1996 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1999 if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) == 0) {
2000 aq_error = i40e_aq_get_phy_capabilities(hw,
2001 FALSE, TRUE, &abilities, NULL);
2002 /* May need delay to detect fiber correctly */
2003 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2004 i40e_msec_delay(200);
2005 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2006 TRUE, &abilities, NULL);
2009 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2010 device_printf(dev, "Unknown PHY type detected!\n");
2013 "Error getting supported media types, err %d,"
2014 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2016 pf->supported_speeds = abilities.link_speed;
2017 #if __FreeBSD_version >= 1100000
2018 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
2020 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
2023 ixl_add_ifmedia(vsi, hw->phy.phy_types);
2027 /* Use autoselect media by default */
2028 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2029 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2031 ether_ifattach(ifp, hw->mac.addr);
2037 ** Run when the Admin Queue gets a link state change interrupt.
2040 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2042 struct i40e_hw *hw = &pf->hw;
2043 device_t dev = pf->dev;
2044 struct i40e_aqc_get_link_status *status =
2045 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2047 /* Request link status from adapter */
2048 hw->phy.get_link_info = TRUE;
2049 i40e_get_link_status(hw, &pf->link_up);
2051 /* Print out message if an unqualified module is found */
2052 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2053 (pf->advertised_speed) &&
2054 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2055 (!(status->link_info & I40E_AQ_LINK_UP)))
2056 device_printf(dev, "Link failed because "
2057 "an unqualified module was detected!\n");
2059 /* Update OS link info */
2060 ixl_update_link_status(pf);
2063 /*********************************************************************
2065 * Get Firmware Switch configuration
2066 * - this will need to be more robust when more complex
2067 * switch configurations are enabled.
2069 **********************************************************************/
2071 ixl_switch_config(struct ixl_pf *pf)
2073 struct i40e_hw *hw = &pf->hw;
2074 struct ixl_vsi *vsi = &pf->vsi;
2075 device_t dev = vsi->dev;
2076 struct i40e_aqc_get_switch_config_resp *sw_config;
2077 u8 aq_buf[I40E_AQ_LARGE_BUF];
2081 memset(&aq_buf, 0, sizeof(aq_buf));
2082 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2083 ret = i40e_aq_get_switch_config(hw, sw_config,
2084 sizeof(aq_buf), &next, NULL);
2086 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2087 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2090 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2092 "Switch config: header reported: %d in structure, %d total\n",
2093 sw_config->header.num_reported, sw_config->header.num_total);
2094 for (int i = 0; i < sw_config->header.num_reported; i++) {
2096 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2097 sw_config->element[i].element_type,
2098 sw_config->element[i].seid,
2099 sw_config->element[i].uplink_seid,
2100 sw_config->element[i].downlink_seid);
2103 /* Simplified due to a single VSI */
2104 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2105 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2106 vsi->seid = sw_config->element[0].seid;
2110 /*********************************************************************
2112 * Initialize the VSI: this handles contexts, which means things
2113 * like the number of descriptors, buffer size,
2114 * plus we init the rings thru this function.
2116 **********************************************************************/
2118 ixl_initialize_vsi(struct ixl_vsi *vsi)
2120 struct ixl_pf *pf = vsi->back;
2121 struct ixl_queue *que = vsi->queues;
2122 device_t dev = vsi->dev;
2123 struct i40e_hw *hw = vsi->hw;
2124 struct i40e_vsi_context ctxt;
2128 memset(&ctxt, 0, sizeof(ctxt));
2129 ctxt.seid = vsi->seid;
2130 if (pf->veb_seid != 0)
2131 ctxt.uplink_seid = pf->veb_seid;
2132 ctxt.pf_num = hw->pf_id;
2133 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2135 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2136 " aq_error %d\n", err, hw->aq.asq_last_status);
2139 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2140 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2141 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2142 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2143 ctxt.uplink_seid, ctxt.vsi_number,
2144 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2145 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2146 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2148 ** Set the queue and traffic class bits
2149 ** - when multiple traffic classes are supported
2150 ** this will need to be more robust.
2152 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2153 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2154 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2155 ctxt.info.queue_mapping[0] = 0;
2157 * This VSI will only use traffic class 0; start traffic class 0's
2158 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2159 * the driver may not use all of them).
2161 tc_queues = bsrl(pf->qtag.num_allocated);
2162 ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2163 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2164 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2165 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2167 /* Set VLAN receive stripping mode */
2168 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2169 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2170 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2171 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2173 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2176 /* Set TCP Enable for iWARP capable VSI */
2177 if (ixl_enable_iwarp && pf->iw_enabled) {
2178 ctxt.info.valid_sections |=
2179 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2180 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2183 /* Save VSI number and info for use later */
2184 vsi->vsi_num = ctxt.vsi_number;
2185 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2187 /* Reset VSI statistics */
2188 ixl_vsi_reset_stats(vsi);
2190 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2192 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2194 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2195 " aq_error %d\n", err, hw->aq.asq_last_status);
2199 for (int i = 0; i < vsi->num_queues; i++, que++) {
2200 struct tx_ring *txr = &que->txr;
2201 struct rx_ring *rxr = &que->rxr;
2202 struct i40e_hmc_obj_txq tctx;
2203 struct i40e_hmc_obj_rxq rctx;
2207 /* Setup the HMC TX Context */
2208 size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
2209 bzero(&tctx, sizeof(tctx));
2210 tctx.new_context = 1;
2211 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2212 tctx.qlen = que->num_tx_desc;
2213 tctx.fc_ena = 0; /* Disable FCoE */
2215 * This value needs to pulled from the VSI that this queue
2216 * is assigned to. Index into array is traffic class.
2218 tctx.rdylist = vsi->info.qs_handle[0];
2220 * Set these to enable Head Writeback
2221 * - Address is last entry in TX ring (reserved for HWB index)
2222 * Leave these as 0 for Descriptor Writeback
2224 if (vsi->enable_head_writeback) {
2225 tctx.head_wb_ena = 1;
2226 tctx.head_wb_addr = txr->dma.pa +
2227 (que->num_tx_desc * sizeof(struct i40e_tx_desc));
2229 tctx.rdylist_act = 0;
2230 err = i40e_clear_lan_tx_queue_context(hw, i);
2232 device_printf(dev, "Unable to clear TX context\n");
2235 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2237 device_printf(dev, "Unable to set TX context\n");
2240 /* Associate the ring with this PF */
2241 txctl = I40E_QTX_CTL_PF_QUEUE;
2242 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2243 I40E_QTX_CTL_PF_INDX_MASK);
2244 wr32(hw, I40E_QTX_CTL(i), txctl);
2247 /* Do ring (re)init */
2248 ixl_init_tx_ring(que);
2250 /* Next setup the HMC RX Context */
2251 if (vsi->max_frame_size <= MCLBYTES)
2252 rxr->mbuf_sz = MCLBYTES;
2254 rxr->mbuf_sz = MJUMPAGESIZE;
2256 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2258 /* Set up an RX context for the HMC */
2259 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2260 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2261 /* ignore header split for now */
2262 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2263 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2264 vsi->max_frame_size : max_rxmax;
2266 rctx.dsize = 1; /* do 32byte descriptors */
2267 rctx.hsplit_0 = 0; /* no header split */
2268 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2269 rctx.qlen = que->num_rx_desc;
2270 rctx.tphrdesc_ena = 1;
2271 rctx.tphwdesc_ena = 1;
2272 rctx.tphdata_ena = 0; /* Header Split related */
2273 rctx.tphhead_ena = 0; /* Header Split related */
2274 rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */
2277 rctx.showiv = 1; /* Strip inner VLAN header */
2278 rctx.fc_ena = 0; /* Disable FCoE */
2279 rctx.prefena = 1; /* Prefetch descriptors */
2281 err = i40e_clear_lan_rx_queue_context(hw, i);
2284 "Unable to clear RX context %d\n", i);
2287 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2289 device_printf(dev, "Unable to set RX context %d\n", i);
2292 err = ixl_init_rx_ring(que);
2294 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2298 /* preserve queue */
2299 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2300 struct netmap_adapter *na = NA(vsi->ifp);
2301 struct netmap_kring *kring = na->rx_rings[i];
2302 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2303 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2305 #endif /* DEV_NETMAP */
2306 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
2312 ixl_vsi_free_queues(struct ixl_vsi *vsi)
2314 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2315 struct ixl_queue *que = vsi->queues;
2317 if (NULL == vsi->queues)
2320 for (int i = 0; i < vsi->num_queues; i++, que++) {
2321 struct tx_ring *txr = &que->txr;
2322 struct rx_ring *rxr = &que->rxr;
2324 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2328 buf_ring_free(txr->br, M_DEVBUF);
2329 ixl_free_que_tx(que);
2331 i40e_free_dma_mem(&pf->hw, &txr->dma);
2333 IXL_TX_LOCK_DESTROY(txr);
2335 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2338 ixl_free_que_rx(que);
2340 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2342 IXL_RX_LOCK_DESTROY(rxr);
2345 sysctl_ctx_free(&vsi->sysctl_ctx);
2349 /*********************************************************************
2351 * Free all VSI structs.
2353 **********************************************************************/
2355 ixl_free_vsi(struct ixl_vsi *vsi)
2357 /* Free station queues */
2358 ixl_vsi_free_queues(vsi);
2360 free(vsi->queues, M_DEVBUF);
2362 /* Free VSI filter list */
2363 ixl_free_mac_filters(vsi);
2367 ixl_free_mac_filters(struct ixl_vsi *vsi)
2369 struct ixl_mac_filter *f;
2371 while (!SLIST_EMPTY(&vsi->ftl)) {
2372 f = SLIST_FIRST(&vsi->ftl);
2373 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2377 vsi->num_hw_filters = 0;
2381 * Fill out fields in queue struct and setup tx/rx memory and structs
2384 ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
2386 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2387 device_t dev = pf->dev;
2388 struct i40e_hw *hw = &pf->hw;
2389 struct tx_ring *txr = &que->txr;
2390 struct rx_ring *rxr = &que->rxr;
2394 que->num_tx_desc = vsi->num_tx_desc;
2395 que->num_rx_desc = vsi->num_rx_desc;
2400 txr->tail = I40E_QTX_TAIL(que->me);
2402 /* Initialize the TX lock */
2403 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2404 device_get_nameunit(dev), que->me);
2405 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2407 * Create the TX descriptor ring
2409 * In Head Writeback mode, the descriptor ring is one bigger
2410 * than the number of descriptors for space for the HW to
2411 * write back index of last completed descriptor.
2413 if (vsi->enable_head_writeback) {
2414 tsize = roundup2((que->num_tx_desc *
2415 sizeof(struct i40e_tx_desc)) +
2416 sizeof(u32), DBA_ALIGN);
2418 tsize = roundup2((que->num_tx_desc *
2419 sizeof(struct i40e_tx_desc)), DBA_ALIGN);
2421 if (i40e_allocate_dma_mem(hw,
2422 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2424 "Unable to allocate TX Descriptor memory\n");
2426 goto err_destroy_tx_mtx;
2428 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2429 bzero((void *)txr->base, tsize);
2430 /* Now allocate transmit soft structs for the ring */
2431 if (ixl_allocate_tx_data(que)) {
2433 "Critical Failure setting up TX structures\n");
2435 goto err_free_tx_dma;
2437 /* Allocate a buf ring */
2438 txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2439 M_NOWAIT, &txr->mtx);
2440 if (txr->br == NULL) {
2442 "Critical Failure setting up TX buf ring\n");
2444 goto err_free_tx_data;
2447 rsize = roundup2(que->num_rx_desc *
2448 sizeof(union i40e_rx_desc), DBA_ALIGN);
2450 rxr->tail = I40E_QRX_TAIL(que->me);
2452 /* Initialize the RX side lock */
2453 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2454 device_get_nameunit(dev), que->me);
2455 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2457 if (i40e_allocate_dma_mem(hw,
2458 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2460 "Unable to allocate RX Descriptor memory\n");
2462 goto err_destroy_rx_mtx;
2464 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2465 bzero((void *)rxr->base, rsize);
2466 /* Allocate receive soft structs for the ring*/
2467 if (ixl_allocate_rx_data(que)) {
2469 "Critical Failure setting up receive structs\n");
2471 goto err_free_rx_dma;
2477 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2479 mtx_destroy(&rxr->mtx);
2480 /* err_free_tx_buf_ring */
2481 buf_ring_free(txr->br, M_DEVBUF);
2483 ixl_free_que_tx(que);
2485 i40e_free_dma_mem(&pf->hw, &txr->dma);
2487 mtx_destroy(&txr->mtx);
2493 ixl_vsi_setup_queues(struct ixl_vsi *vsi)
2495 struct ixl_queue *que;
2498 for (int i = 0; i < vsi->num_queues; i++) {
2499 que = &vsi->queues[i];
2500 error = ixl_vsi_setup_queue(vsi, que, i);
2505 sysctl_ctx_init(&vsi->sysctl_ctx);
2511 /*********************************************************************
2513 * Allocate memory for the VSI (virtual station interface) and their
2514 * associated queues, rings and the descriptors associated with each,
2515 * called only once at attach.
2517 **********************************************************************/
2519 ixl_setup_stations(struct ixl_pf *pf)
2521 device_t dev = pf->dev;
2522 struct ixl_vsi *vsi;
2526 vsi->back = (void *)pf;
2533 vsi->flags |= IXL_FLAGS_USES_MSIX;
2535 /* Get memory for the station queues */
2537 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2538 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2539 device_printf(dev, "Unable to allocate queue memory\n");
2541 goto ixl_setup_stations_err;
2544 /* Then setup each queue */
2545 error = ixl_vsi_setup_queues(vsi);
2546 ixl_setup_stations_err:
2551 ** Provide a update to the queue RX
2552 ** interrupt moderation value.
2555 ixl_set_queue_rx_itr(struct ixl_queue *que)
2557 struct ixl_vsi *vsi = que->vsi;
2558 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2559 struct i40e_hw *hw = vsi->hw;
2560 struct rx_ring *rxr = &que->rxr;
2565 /* Idle, do nothing */
2566 if (rxr->bytes == 0)
2569 if (pf->dynamic_rx_itr) {
2570 rx_bytes = rxr->bytes/rxr->itr;
2573 /* Adjust latency range */
2574 switch (rxr->latency) {
2575 case IXL_LOW_LATENCY:
2576 if (rx_bytes > 10) {
2577 rx_latency = IXL_AVE_LATENCY;
2578 rx_itr = IXL_ITR_20K;
2581 case IXL_AVE_LATENCY:
2582 if (rx_bytes > 20) {
2583 rx_latency = IXL_BULK_LATENCY;
2584 rx_itr = IXL_ITR_8K;
2585 } else if (rx_bytes <= 10) {
2586 rx_latency = IXL_LOW_LATENCY;
2587 rx_itr = IXL_ITR_100K;
2590 case IXL_BULK_LATENCY:
2591 if (rx_bytes <= 20) {
2592 rx_latency = IXL_AVE_LATENCY;
2593 rx_itr = IXL_ITR_20K;
2598 rxr->latency = rx_latency;
2600 if (rx_itr != rxr->itr) {
2601 /* do an exponential smoothing */
2602 rx_itr = (10 * rx_itr * rxr->itr) /
2603 ((9 * rx_itr) + rxr->itr);
2604 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2605 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2606 que->me), rxr->itr);
2608 } else { /* We may have have toggled to non-dynamic */
2609 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2610 vsi->rx_itr_setting = pf->rx_itr;
2611 /* Update the hardware if needed */
2612 if (rxr->itr != vsi->rx_itr_setting) {
2613 rxr->itr = vsi->rx_itr_setting;
2614 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2615 que->me), rxr->itr);
2625 ** Provide a update to the queue TX
2626 ** interrupt moderation value.
2629 ixl_set_queue_tx_itr(struct ixl_queue *que)
2631 struct ixl_vsi *vsi = que->vsi;
2632 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2633 struct i40e_hw *hw = vsi->hw;
2634 struct tx_ring *txr = &que->txr;
2640 /* Idle, do nothing */
2641 if (txr->bytes == 0)
2644 if (pf->dynamic_tx_itr) {
2645 tx_bytes = txr->bytes/txr->itr;
2648 switch (txr->latency) {
2649 case IXL_LOW_LATENCY:
2650 if (tx_bytes > 10) {
2651 tx_latency = IXL_AVE_LATENCY;
2652 tx_itr = IXL_ITR_20K;
2655 case IXL_AVE_LATENCY:
2656 if (tx_bytes > 20) {
2657 tx_latency = IXL_BULK_LATENCY;
2658 tx_itr = IXL_ITR_8K;
2659 } else if (tx_bytes <= 10) {
2660 tx_latency = IXL_LOW_LATENCY;
2661 tx_itr = IXL_ITR_100K;
2664 case IXL_BULK_LATENCY:
2665 if (tx_bytes <= 20) {
2666 tx_latency = IXL_AVE_LATENCY;
2667 tx_itr = IXL_ITR_20K;
2672 txr->latency = tx_latency;
2674 if (tx_itr != txr->itr) {
2675 /* do an exponential smoothing */
2676 tx_itr = (10 * tx_itr * txr->itr) /
2677 ((9 * tx_itr) + txr->itr);
2678 txr->itr = min(tx_itr, IXL_MAX_ITR);
2679 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2680 que->me), txr->itr);
2683 } else { /* We may have have toggled to non-dynamic */
2684 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2685 vsi->tx_itr_setting = pf->tx_itr;
2686 /* Update the hardware if needed */
2687 if (txr->itr != vsi->tx_itr_setting) {
2688 txr->itr = vsi->tx_itr_setting;
2689 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2690 que->me), txr->itr);
2699 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
2701 struct sysctl_oid *tree;
2702 struct sysctl_oid_list *child;
2703 struct sysctl_oid_list *vsi_list;
2705 tree = device_get_sysctl_tree(vsi->dev);
2706 child = SYSCTL_CHILDREN(tree);
2707 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
2708 CTLFLAG_RD, NULL, "VSI Number");
2710 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2711 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
2714 ixl_vsi_add_queues_stats(vsi);
2718 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2719 * Writes to the ITR registers immediately.
2722 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2724 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2725 device_t dev = pf->dev;
2727 int requested_tx_itr;
2729 requested_tx_itr = pf->tx_itr;
2730 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2731 if ((error) || (req->newptr == NULL))
2733 if (pf->dynamic_tx_itr) {
2735 "Cannot set TX itr value while dynamic TX itr is enabled\n");
2738 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2740 "Invalid TX itr value; value must be between 0 and %d\n",
2745 pf->tx_itr = requested_tx_itr;
2746 ixl_configure_tx_itr(pf);
2752 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2753 * Writes to the ITR registers immediately.
2756 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2758 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2759 device_t dev = pf->dev;
2761 int requested_rx_itr;
2763 requested_rx_itr = pf->rx_itr;
2764 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2765 if ((error) || (req->newptr == NULL))
2767 if (pf->dynamic_rx_itr) {
2769 "Cannot set RX itr value while dynamic RX itr is enabled\n");
2772 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2774 "Invalid RX itr value; value must be between 0 and %d\n",
2779 pf->rx_itr = requested_rx_itr;
2780 ixl_configure_rx_itr(pf);
2786 ixl_add_hw_stats(struct ixl_pf *pf)
2788 device_t dev = pf->dev;
2789 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2791 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2792 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2793 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2795 /* Driver statistics */
2796 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2797 CTLFLAG_RD, &pf->watchdog_events,
2798 "Watchdog timeouts");
2799 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2800 CTLFLAG_RD, &pf->admin_irq,
2801 "Admin Queue IRQ Handled");
2803 ixl_vsi_add_sysctls(&pf->vsi, "pf", true);
2805 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2809 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2810 struct sysctl_oid_list *child,
2811 struct i40e_hw_port_stats *stats)
2813 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2814 CTLFLAG_RD, NULL, "Mac Statistics");
2815 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2817 struct i40e_eth_stats *eth_stats = &stats->eth;
2818 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2820 struct ixl_sysctl_info ctls[] =
2822 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2823 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2824 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2825 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2826 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2827 /* Packet Reception Stats */
2828 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2829 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2830 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2831 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2832 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2833 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2834 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2835 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2836 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2837 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2838 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2839 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2840 /* Packet Transmission Stats */
2841 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2842 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2843 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2844 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2845 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2846 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2847 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2849 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2850 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2851 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2852 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2857 struct ixl_sysctl_info *entry = ctls;
2858 while (entry->stat != 0)
2860 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2861 CTLFLAG_RD, entry->stat,
2862 entry->description);
2868 ixl_set_rss_key(struct ixl_pf *pf)
2870 struct i40e_hw *hw = &pf->hw;
2871 struct ixl_vsi *vsi = &pf->vsi;
2872 device_t dev = pf->dev;
2873 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2874 enum i40e_status_code status;
2877 /* Fetch the configured RSS key */
2878 rss_getkey((uint8_t *) &rss_seed);
2880 ixl_get_default_rss_key(rss_seed);
2882 /* Fill out hash function seed */
2883 if (hw->mac.type == I40E_MAC_X722) {
2884 struct i40e_aqc_get_set_rss_key_data key_data;
2885 bcopy(rss_seed, &key_data, 52);
2886 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2889 "i40e_aq_set_rss_key status %s, error %s\n",
2890 i40e_stat_str(hw, status),
2891 i40e_aq_str(hw, hw->aq.asq_last_status));
2893 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2894 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2899 * Configure enabled PCTYPES for RSS.
2902 ixl_set_rss_pctypes(struct ixl_pf *pf)
2904 struct i40e_hw *hw = &pf->hw;
2905 u64 set_hena = 0, hena;
2908 u32 rss_hash_config;
2910 rss_hash_config = rss_gethashconfig();
2911 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2912 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2913 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2914 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2915 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2916 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2917 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2918 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2919 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2920 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2921 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2922 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2923 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2924 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2926 if (hw->mac.type == I40E_MAC_X722)
2927 set_hena = IXL_DEFAULT_RSS_HENA_X722;
2929 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2931 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2932 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2934 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2935 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2940 ixl_set_rss_hlut(struct ixl_pf *pf)
2942 struct i40e_hw *hw = &pf->hw;
2943 device_t dev = pf->dev;
2944 struct ixl_vsi *vsi = &pf->vsi;
2946 int lut_entry_width;
2948 enum i40e_status_code status;
2950 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
2952 /* Populate the LUT with max no. of queues in round robin fashion */
2954 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
2957 * Fetch the RSS bucket id for the given indirection entry.
2958 * Cap it at the number of configured buckets (which is
2961 que_id = rss_get_indirection_to_bucket(i);
2962 que_id = que_id % vsi->num_queues;
2964 que_id = i % vsi->num_queues;
2966 lut = (que_id & ((0x1 << lut_entry_width) - 1));
2970 if (hw->mac.type == I40E_MAC_X722) {
2971 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
2973 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
2974 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2976 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
2977 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
2983 ** Setup the PF's RSS parameters.
2986 ixl_config_rss(struct ixl_pf *pf)
2988 ixl_set_rss_key(pf);
2989 ixl_set_rss_pctypes(pf);
2990 ixl_set_rss_hlut(pf);
2994 ** This routine is run via an vlan config EVENT,
2995 ** it enables us to use the HW Filter table since
2996 ** we can get the vlan id. This just creates the
2997 ** entry in the soft version of the VFTA, init will
2998 ** repopulate the real table.
3001 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3003 struct ixl_vsi *vsi = ifp->if_softc;
3004 struct i40e_hw *hw = vsi->hw;
3005 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3007 if (ifp->if_softc != arg) /* Not our event */
3010 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3015 ixl_add_filter(vsi, hw->mac.addr, vtag);
3020 ** This routine is run via an vlan
3021 ** unconfig EVENT, remove our entry
3022 ** in the soft vfta.
3025 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3027 struct ixl_vsi *vsi = ifp->if_softc;
3028 struct i40e_hw *hw = vsi->hw;
3029 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3031 if (ifp->if_softc != arg)
3034 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3039 ixl_del_filter(vsi, hw->mac.addr, vtag);
3044 * In some firmware versions there is default MAC/VLAN filter
3045 * configured which interferes with filters managed by driver.
3046 * Make sure it's removed.
3049 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
3051 struct i40e_aqc_remove_macvlan_element_data e;
3053 bzero(&e, sizeof(e));
3054 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
3056 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3057 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
3059 bzero(&e, sizeof(e));
3060 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
3062 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3063 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3064 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
3067 static enum i40e_status_code
3068 ixl_set_lla(struct ixl_vsi *vsi)
3070 struct i40e_hw *hw = vsi->hw;
3071 u8 tmpaddr[ETHER_ADDR_LEN];
3072 enum i40e_status_code status;
3074 status = I40E_SUCCESS;
3076 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETHER_ADDR_LEN);
3077 if (memcmp(hw->mac.addr, tmpaddr, ETHER_ADDR_LEN) == 0)
3080 status = i40e_validate_mac_addr(tmpaddr);
3081 if (status != I40E_SUCCESS)
3084 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
3085 bcopy(tmpaddr, hw->mac.addr, ETHER_ADDR_LEN);
3086 status = i40e_aq_mac_address_write(hw,
3087 I40E_AQC_WRITE_TYPE_LAA_ONLY,
3088 hw->mac.addr, NULL);
3089 if (status != I40E_SUCCESS)
3092 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
3098 ** Initialize filter list and add filters that the hardware
3099 ** needs to know about.
3101 ** Requires VSI's seid to be set before calling.
3104 ixl_init_filters(struct ixl_vsi *vsi)
3106 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3108 /* Initialize mac filter list for VSI */
3109 SLIST_INIT(&vsi->ftl);
3110 vsi->num_hw_filters = 0;
3112 /* Add broadcast address */
3113 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3115 if (IXL_VSI_IS_VF(vsi))
3118 ixl_del_default_hw_filters(vsi);
3120 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
3123 * Prevent Tx flow control frames from being sent out by
3124 * non-firmware transmitters.
3125 * This affects every VSI in the PF.
3127 if (pf->enable_tx_fc_filter)
3128 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3132 ** This routine adds mulicast filters
3135 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3137 struct ixl_mac_filter *f;
3139 /* Does one already exist */
3140 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3144 f = ixl_get_filter(vsi);
3146 printf("WARNING: no filter available!!\n");
3149 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3150 f->vlan = IXL_VLAN_ANY;
3151 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3158 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3160 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_hw_filters);
3164 ** This routine adds macvlan filters
3167 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3169 struct ixl_mac_filter *f, *tmp;
3173 DEBUGOUT("ixl_add_filter: begin");
3178 /* Does one already exist */
3179 f = ixl_find_filter(vsi, macaddr, vlan);
3183 ** Is this the first vlan being registered, if so we
3184 ** need to remove the ANY filter that indicates we are
3185 ** not in a vlan, and replace that with a 0 filter.
3187 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3188 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3190 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3191 ixl_add_filter(vsi, macaddr, 0);
3195 f = ixl_get_filter(vsi);
3197 device_printf(dev, "WARNING: no filter available!!\n");
3200 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3202 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3203 if (f->vlan != IXL_VLAN_ANY)
3204 f->flags |= IXL_FILTER_VLAN;
3208 ixl_add_hw_filters(vsi, f->flags, 1);
3213 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3215 struct ixl_mac_filter *f;
3217 f = ixl_find_filter(vsi, macaddr, vlan);
3221 f->flags |= IXL_FILTER_DEL;
3222 ixl_del_hw_filters(vsi, 1);
3225 /* Check if this is the last vlan removal */
3226 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3227 /* Switch back to a non-vlan filter */
3228 ixl_del_filter(vsi, macaddr, 0);
3229 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3235 ** Find the filter with both matching mac addr and vlan id
3237 struct ixl_mac_filter *
3238 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3240 struct ixl_mac_filter *f;
3243 SLIST_FOREACH(f, &vsi->ftl, next) {
3244 if (!cmp_etheraddr(f->macaddr, macaddr))
3246 if (f->vlan == vlan) {
3258 ** This routine takes additions to the vsi filter
3259 ** table and creates an Admin Queue call to create
3260 ** the filters in the hardware.
3263 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3265 struct i40e_aqc_add_macvlan_element_data *a, *b;
3266 struct ixl_mac_filter *f;
3275 IXL_PF_LOCK_ASSERT(pf);
3277 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3278 M_DEVBUF, M_NOWAIT | M_ZERO);
3280 device_printf(dev, "add_hw_filters failed to get memory\n");
3285 ** Scan the filter list, each time we find one
3286 ** we add it to the admin queue array and turn off
3289 SLIST_FOREACH(f, &vsi->ftl, next) {
3290 if ((f->flags & flags) == flags) {
3291 b = &a[j]; // a pox on fvl long names :)
3292 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3293 if (f->vlan == IXL_VLAN_ANY) {
3295 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3297 b->vlan_tag = f->vlan;
3300 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3301 f->flags &= ~IXL_FILTER_ADD;
3308 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3310 device_printf(dev, "aq_add_macvlan err %d, "
3311 "aq_error %d\n", err, hw->aq.asq_last_status);
3313 vsi->num_hw_filters += j;
3320 ** This routine takes removals in the vsi filter
3321 ** table and creates an Admin Queue call to delete
3322 ** the filters in the hardware.
3325 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3327 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3331 struct ixl_mac_filter *f, *f_temp;
3334 DEBUGOUT("ixl_del_hw_filters: begin\n");
3340 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3341 M_DEVBUF, M_NOWAIT | M_ZERO);
3343 printf("del hw filter failed to get memory\n");
3347 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3348 if (f->flags & IXL_FILTER_DEL) {
3349 e = &d[j]; // a pox on fvl long names :)
3350 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3351 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3352 if (f->vlan == IXL_VLAN_ANY) {
3354 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3356 e->vlan_tag = f->vlan;
3358 /* delete entry from vsi list */
3359 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3367 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3368 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3370 for (int i = 0; i < j; i++)
3371 sc += (!d[i].error_code);
3372 vsi->num_hw_filters -= sc;
3374 "Failed to remove %d/%d filters, aq error %d\n",
3375 j - sc, j, hw->aq.asq_last_status);
3377 vsi->num_hw_filters -= j;
3381 DEBUGOUT("ixl_del_hw_filters: end\n");
3386 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3388 struct i40e_hw *hw = &pf->hw;
3393 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3395 ixl_dbg(pf, IXL_DBG_EN_DIS,
3396 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3399 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3401 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3402 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3403 I40E_QTX_ENA_QENA_STAT_MASK;
3404 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3405 /* Verify the enable took */
3406 for (int j = 0; j < 10; j++) {
3407 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3408 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3410 i40e_usec_delay(10);
3412 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3413 device_printf(pf->dev, "TX queue %d still disabled!\n",
3422 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3424 struct i40e_hw *hw = &pf->hw;
3429 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3431 ixl_dbg(pf, IXL_DBG_EN_DIS,
3432 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3435 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3436 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3437 I40E_QRX_ENA_QENA_STAT_MASK;
3438 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3439 /* Verify the enable took */
3440 for (int j = 0; j < 10; j++) {
3441 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3442 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3444 i40e_usec_delay(10);
3446 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3447 device_printf(pf->dev, "RX queue %d still disabled!\n",
3456 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3460 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3461 /* Called function already prints error message */
3464 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3468 /* For PF VSI only */
3470 ixl_enable_rings(struct ixl_vsi *vsi)
3472 struct ixl_pf *pf = vsi->back;
3475 for (int i = 0; i < vsi->num_queues; i++) {
3476 error = ixl_enable_ring(pf, &pf->qtag, i);
3485 * Returns error on first ring that is detected hung.
3488 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3490 struct i40e_hw *hw = &pf->hw;
3495 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3497 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3498 i40e_usec_delay(500);
3500 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3501 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3502 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3503 /* Verify the disable took */
3504 for (int j = 0; j < 10; j++) {
3505 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3506 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3508 i40e_msec_delay(10);
3510 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3511 device_printf(pf->dev, "TX queue %d still enabled!\n",
3520 * Returns error on first ring that is detected hung.
3523 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3525 struct i40e_hw *hw = &pf->hw;
3530 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3532 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3533 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3534 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3535 /* Verify the disable took */
3536 for (int j = 0; j < 10; j++) {
3537 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3538 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3540 i40e_msec_delay(10);
3542 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3543 device_printf(pf->dev, "RX queue %d still enabled!\n",
3552 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3556 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3557 /* Called function already prints error message */
3560 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3564 /* For PF VSI only */
3566 ixl_disable_rings(struct ixl_vsi *vsi)
3568 struct ixl_pf *pf = vsi->back;
3571 for (int i = 0; i < vsi->num_queues; i++) {
3572 error = ixl_disable_ring(pf, &pf->qtag, i);
3581 * ixl_handle_mdd_event
3583 * Called from interrupt handler to identify possibly malicious vfs
3584 * (But also detects events from the PF, as well)
3587 ixl_handle_mdd_event(struct ixl_pf *pf)
3589 struct i40e_hw *hw = &pf->hw;
3590 device_t dev = pf->dev;
3591 bool mdd_detected = false;
3592 bool pf_mdd_detected = false;
3595 /* find what triggered the MDD event */
3596 reg = rd32(hw, I40E_GL_MDET_TX);
3597 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3598 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3599 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3600 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3601 I40E_GL_MDET_TX_EVENT_SHIFT;
3602 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3603 I40E_GL_MDET_TX_QUEUE_SHIFT;
3605 "Malicious Driver Detection event %d"
3606 " on TX queue %d, pf number %d\n",
3607 event, queue, pf_num);
3608 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3609 mdd_detected = true;
3611 reg = rd32(hw, I40E_GL_MDET_RX);
3612 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3613 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3614 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3615 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3616 I40E_GL_MDET_RX_EVENT_SHIFT;
3617 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3618 I40E_GL_MDET_RX_QUEUE_SHIFT;
3620 "Malicious Driver Detection event %d"
3621 " on RX queue %d, pf number %d\n",
3622 event, queue, pf_num);
3623 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3624 mdd_detected = true;
3628 reg = rd32(hw, I40E_PF_MDET_TX);
3629 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3630 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3632 "MDD TX event is for this function!\n");
3633 pf_mdd_detected = true;
3635 reg = rd32(hw, I40E_PF_MDET_RX);
3636 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3637 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3639 "MDD RX event is for this function!\n");
3640 pf_mdd_detected = true;
3644 /* re-enable mdd interrupt cause */
3645 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3646 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3647 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3652 ixl_enable_intr(struct ixl_vsi *vsi)
3654 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3655 struct i40e_hw *hw = vsi->hw;
3656 struct ixl_queue *que = vsi->queues;
3659 for (int i = 0; i < vsi->num_queues; i++, que++)
3660 ixl_enable_queue(hw, que->me);
3662 ixl_enable_intr0(hw);
3666 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3668 struct i40e_hw *hw = vsi->hw;
3669 struct ixl_queue *que = vsi->queues;
3671 for (int i = 0; i < vsi->num_queues; i++, que++)
3672 ixl_disable_queue(hw, que->me);
3676 ixl_enable_intr0(struct i40e_hw *hw)
3680 /* Use IXL_ITR_NONE so ITR isn't updated here */
3681 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3682 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3683 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3684 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3688 ixl_disable_intr0(struct i40e_hw *hw)
3692 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3693 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3698 ixl_enable_queue(struct i40e_hw *hw, int id)
3702 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3703 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3704 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3705 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3709 ixl_disable_queue(struct i40e_hw *hw, int id)
3713 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3714 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3718 ixl_update_stats_counters(struct ixl_pf *pf)
3720 struct i40e_hw *hw = &pf->hw;
3721 struct ixl_vsi *vsi = &pf->vsi;
3724 struct i40e_hw_port_stats *nsd = &pf->stats;
3725 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3727 /* Update hw stats */
3728 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3729 pf->stat_offsets_loaded,
3730 &osd->crc_errors, &nsd->crc_errors);
3731 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3732 pf->stat_offsets_loaded,
3733 &osd->illegal_bytes, &nsd->illegal_bytes);
3734 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3735 I40E_GLPRT_GORCL(hw->port),
3736 pf->stat_offsets_loaded,
3737 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3738 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3739 I40E_GLPRT_GOTCL(hw->port),
3740 pf->stat_offsets_loaded,
3741 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3742 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3743 pf->stat_offsets_loaded,
3744 &osd->eth.rx_discards,
3745 &nsd->eth.rx_discards);
3746 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3747 I40E_GLPRT_UPRCL(hw->port),
3748 pf->stat_offsets_loaded,
3749 &osd->eth.rx_unicast,
3750 &nsd->eth.rx_unicast);
3751 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3752 I40E_GLPRT_UPTCL(hw->port),
3753 pf->stat_offsets_loaded,
3754 &osd->eth.tx_unicast,
3755 &nsd->eth.tx_unicast);
3756 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3757 I40E_GLPRT_MPRCL(hw->port),
3758 pf->stat_offsets_loaded,
3759 &osd->eth.rx_multicast,
3760 &nsd->eth.rx_multicast);
3761 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3762 I40E_GLPRT_MPTCL(hw->port),
3763 pf->stat_offsets_loaded,
3764 &osd->eth.tx_multicast,
3765 &nsd->eth.tx_multicast);
3766 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3767 I40E_GLPRT_BPRCL(hw->port),
3768 pf->stat_offsets_loaded,
3769 &osd->eth.rx_broadcast,
3770 &nsd->eth.rx_broadcast);
3771 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3772 I40E_GLPRT_BPTCL(hw->port),
3773 pf->stat_offsets_loaded,
3774 &osd->eth.tx_broadcast,
3775 &nsd->eth.tx_broadcast);
3777 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3778 pf->stat_offsets_loaded,
3779 &osd->tx_dropped_link_down,
3780 &nsd->tx_dropped_link_down);
3781 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3782 pf->stat_offsets_loaded,
3783 &osd->mac_local_faults,
3784 &nsd->mac_local_faults);
3785 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3786 pf->stat_offsets_loaded,
3787 &osd->mac_remote_faults,
3788 &nsd->mac_remote_faults);
3789 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3790 pf->stat_offsets_loaded,
3791 &osd->rx_length_errors,
3792 &nsd->rx_length_errors);
3794 /* Flow control (LFC) stats */
3795 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3796 pf->stat_offsets_loaded,
3797 &osd->link_xon_rx, &nsd->link_xon_rx);
3798 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3799 pf->stat_offsets_loaded,
3800 &osd->link_xon_tx, &nsd->link_xon_tx);
3801 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3802 pf->stat_offsets_loaded,
3803 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3804 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3805 pf->stat_offsets_loaded,
3806 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3808 /* Packet size stats rx */
3809 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3810 I40E_GLPRT_PRC64L(hw->port),
3811 pf->stat_offsets_loaded,
3812 &osd->rx_size_64, &nsd->rx_size_64);
3813 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3814 I40E_GLPRT_PRC127L(hw->port),
3815 pf->stat_offsets_loaded,
3816 &osd->rx_size_127, &nsd->rx_size_127);
3817 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3818 I40E_GLPRT_PRC255L(hw->port),
3819 pf->stat_offsets_loaded,
3820 &osd->rx_size_255, &nsd->rx_size_255);
3821 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3822 I40E_GLPRT_PRC511L(hw->port),
3823 pf->stat_offsets_loaded,
3824 &osd->rx_size_511, &nsd->rx_size_511);
3825 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3826 I40E_GLPRT_PRC1023L(hw->port),
3827 pf->stat_offsets_loaded,
3828 &osd->rx_size_1023, &nsd->rx_size_1023);
3829 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3830 I40E_GLPRT_PRC1522L(hw->port),
3831 pf->stat_offsets_loaded,
3832 &osd->rx_size_1522, &nsd->rx_size_1522);
3833 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3834 I40E_GLPRT_PRC9522L(hw->port),
3835 pf->stat_offsets_loaded,
3836 &osd->rx_size_big, &nsd->rx_size_big);
3838 /* Packet size stats tx */
3839 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3840 I40E_GLPRT_PTC64L(hw->port),
3841 pf->stat_offsets_loaded,
3842 &osd->tx_size_64, &nsd->tx_size_64);
3843 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3844 I40E_GLPRT_PTC127L(hw->port),
3845 pf->stat_offsets_loaded,
3846 &osd->tx_size_127, &nsd->tx_size_127);
3847 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3848 I40E_GLPRT_PTC255L(hw->port),
3849 pf->stat_offsets_loaded,
3850 &osd->tx_size_255, &nsd->tx_size_255);
3851 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3852 I40E_GLPRT_PTC511L(hw->port),
3853 pf->stat_offsets_loaded,
3854 &osd->tx_size_511, &nsd->tx_size_511);
3855 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3856 I40E_GLPRT_PTC1023L(hw->port),
3857 pf->stat_offsets_loaded,
3858 &osd->tx_size_1023, &nsd->tx_size_1023);
3859 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3860 I40E_GLPRT_PTC1522L(hw->port),
3861 pf->stat_offsets_loaded,
3862 &osd->tx_size_1522, &nsd->tx_size_1522);
3863 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3864 I40E_GLPRT_PTC9522L(hw->port),
3865 pf->stat_offsets_loaded,
3866 &osd->tx_size_big, &nsd->tx_size_big);
3868 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3869 pf->stat_offsets_loaded,
3870 &osd->rx_undersize, &nsd->rx_undersize);
3871 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3872 pf->stat_offsets_loaded,
3873 &osd->rx_fragments, &nsd->rx_fragments);
3874 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3875 pf->stat_offsets_loaded,
3876 &osd->rx_oversize, &nsd->rx_oversize);
3877 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3878 pf->stat_offsets_loaded,
3879 &osd->rx_jabber, &nsd->rx_jabber);
3881 i40e_get_phy_lpi_status(hw, nsd);
3883 ixl_stat_update32(hw, I40E_PRTPM_TLPIC,
3884 pf->stat_offsets_loaded,
3885 &osd->tx_lpi_count, &nsd->tx_lpi_count);
3886 ixl_stat_update32(hw, I40E_PRTPM_RLPIC,
3887 pf->stat_offsets_loaded,
3888 &osd->rx_lpi_count, &nsd->rx_lpi_count);
3890 pf->stat_offsets_loaded = true;
3893 /* Update vsi stats */
3894 ixl_update_vsi_stats(vsi);
3896 for (int i = 0; i < pf->num_vfs; i++) {
3898 if (vf->vf_flags & VF_FLAG_ENABLED)
3899 ixl_update_eth_stats(&pf->vfs[i].vsi);
3904 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
3906 struct i40e_hw *hw = &pf->hw;
3907 struct ixl_vsi *vsi = &pf->vsi;
3908 device_t dev = pf->dev;
3915 ixl_teardown_queue_msix(vsi);
3917 if (hw->hmc.hmc_obj) {
3918 error = i40e_shutdown_lan_hmc(hw);
3921 "Shutdown LAN HMC failed with code %d\n", error);
3924 callout_drain(&pf->timer);
3926 ixl_disable_intr0(hw);
3927 ixl_teardown_adminq_msix(pf);
3929 error = i40e_shutdown_adminq(hw);
3932 "Shutdown Admin queue failed with code %d\n", error);
3934 /* Free ring buffers, locks and filters */
3935 ixl_vsi_free_queues(vsi);
3937 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
3943 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
3945 struct i40e_hw *hw = &pf->hw;
3946 struct ixl_vsi *vsi = &pf->vsi;
3947 device_t dev = pf->dev;
3948 enum i40e_get_fw_lldp_status_resp lldp_status;
3951 device_printf(dev, "Rebuilding driver state...\n");
3953 if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
3954 if (ixl_fw_recovery_mode(pf)) {
3955 atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
3956 pf->link_up = FALSE;
3957 ixl_update_link_status(pf);
3963 error = i40e_init_adminq(hw);
3964 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3965 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3967 goto ixl_rebuild_hw_structs_after_reset_err;
3970 i40e_clear_pxe_mode(hw);
3972 error = ixl_get_hw_capabilities(pf);
3974 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
3975 goto ixl_rebuild_hw_structs_after_reset_err;
3977 ixl_configure_intr0_msix(pf);
3978 ixl_enable_intr0(hw);
3980 /* Do not init LAN HMC and bring interface up in recovery mode */
3981 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) == 0) {
3982 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3983 hw->func_caps.num_rx_qp, 0, 0);
3985 device_printf(dev, "init_lan_hmc failed: %d\n", error);
3986 goto ixl_rebuild_hw_structs_after_reset_err;
3989 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3991 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3992 goto ixl_rebuild_hw_structs_after_reset_err;
3995 if (!pf->qmgr.qinfo) {
3996 /* Init queue allocation manager */
3997 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_rx_qp);
3999 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
4001 goto ixl_rebuild_hw_structs_after_reset_err;
4004 /* reserve a contiguous allocation for the PF's VSI */
4005 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
4007 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
4009 /* TODO: error handling */
4011 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
4012 pf->qtag.num_allocated, pf->qtag.num_active);
4014 error = ixl_switch_config(pf);
4016 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
4018 goto ixl_rebuild_hw_structs_after_reset_err;
4020 } /* not in recovery mode */
4022 /* Remove default filters reinstalled by FW on reset */
4023 ixl_del_default_hw_filters(vsi);
4025 if (ixl_vsi_setup_queues(vsi)) {
4026 device_printf(dev, "setup queues failed!\n");
4028 goto ixl_rebuild_hw_structs_after_reset_err;
4031 ixl_vsi_add_sysctls(vsi, "pf", true);
4034 error = ixl_setup_adminq_msix(pf);
4036 device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
4038 goto ixl_rebuild_hw_structs_after_reset_err;
4041 ixl_configure_intr0_msix(pf);
4042 ixl_enable_intr0(hw);
4044 error = ixl_setup_queue_msix(vsi);
4046 device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4048 goto ixl_rebuild_hw_structs_after_reset_err;
4050 error = ixl_setup_queue_tqs(vsi);
4052 device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
4054 goto ixl_rebuild_hw_structs_after_reset_err;
4057 error = ixl_setup_legacy(pf);
4059 device_printf(dev, "ixl_setup_legacy() error: %d\n",
4061 goto ixl_rebuild_hw_structs_after_reset_err;
4065 /* Do not bring interface up in recovery mode */
4066 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0)
4069 /* Determine link state */
4070 if (ixl_attach_get_link_status(pf)) {
4072 /* TODO: error handling */
4075 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
4077 /* Query device FW LLDP status */
4078 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
4079 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
4080 atomic_set_int(&pf->state,
4081 IXL_PF_STATE_FW_LLDP_DISABLED);
4083 atomic_clear_int(&pf->state,
4084 IXL_PF_STATE_FW_LLDP_DISABLED);
4091 device_printf(dev, "Rebuilding driver state done.\n");
4093 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
4097 ixl_rebuild_hw_structs_after_reset_err:
4098 device_printf(dev, "Reload the driver to recover\n");
4103 ixl_handle_empr_reset(struct ixl_pf *pf)
4105 struct ixl_vsi *vsi = &pf->vsi;
4106 struct i40e_hw *hw = &pf->hw;
4107 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
4110 ixl_prepare_for_reset(pf, is_up);
4112 * i40e_pf_reset checks the type of reset and acts
4113 * accordingly. If EMP or Core reset was performed
4114 * doing PF reset is not necessary and it sometimes
4117 error = i40e_pf_reset(hw);
4119 device_printf(pf->dev, "PF reset failure %s\n",
4120 i40e_stat_str(hw, error));
4123 ixl_rebuild_hw_structs_after_reset(pf, is_up);
4125 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4129 ** Tasklet handler for MSIX Adminq interrupts
4130 ** - do outside interrupt since it might sleep
4133 ixl_do_adminq(void *context, int pending)
4135 struct ixl_pf *pf = context;
4136 struct i40e_hw *hw = &pf->hw;
4137 struct i40e_arq_event_info event;
4139 device_t dev = pf->dev;
4141 u16 opcode, arq_pending;
4143 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4144 /* Flag cleared at end of this function */
4145 ixl_handle_empr_reset(pf);
4149 /* Admin Queue handling */
4150 event.buf_len = IXL_AQ_BUF_SZ;
4151 event.msg_buf = malloc(event.buf_len,
4152 M_DEVBUF, M_NOWAIT | M_ZERO);
4153 if (!event.msg_buf) {
4154 device_printf(dev, "%s: Unable to allocate memory for Admin"
4155 " Queue event!\n", __func__);
4160 /* clean and process any events */
4162 ret = i40e_clean_arq_element(hw, &event, &arq_pending);
4165 opcode = LE16_TO_CPU(event.desc.opcode);
4166 ixl_dbg(pf, IXL_DBG_AQ,
4167 "Admin Queue event: %#06x\n", opcode);
4169 case i40e_aqc_opc_get_link_status:
4170 ixl_link_event(pf, &event);
4172 case i40e_aqc_opc_send_msg_to_pf:
4174 ixl_handle_vf_msg(pf, &event);
4177 case i40e_aqc_opc_event_lan_overflow:
4182 } while (arq_pending && (loop++ < IXL_ADM_LIMIT));
4184 free(event.msg_buf, M_DEVBUF);
4186 /* If there are still messages to process, reschedule. */
4187 if (arq_pending > 0)
4188 taskqueue_enqueue(pf->tq, &pf->adminq);
4190 ixl_enable_intr0(hw);
4196 * Update VSI-specific ethernet statistics counters.
4199 ixl_update_eth_stats(struct ixl_vsi *vsi)
4201 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4202 struct i40e_hw *hw = &pf->hw;
4203 struct i40e_eth_stats *es;
4204 struct i40e_eth_stats *oes;
4205 u16 stat_idx = vsi->info.stat_counter_idx;
4207 es = &vsi->eth_stats;
4208 oes = &vsi->eth_stats_offsets;
4210 /* Gather up the stats that the hw collects */
4211 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4212 vsi->stat_offsets_loaded,
4213 &oes->tx_errors, &es->tx_errors);
4214 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4215 vsi->stat_offsets_loaded,
4216 &oes->rx_discards, &es->rx_discards);
4218 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4219 I40E_GLV_GORCL(stat_idx),
4220 vsi->stat_offsets_loaded,
4221 &oes->rx_bytes, &es->rx_bytes);
4222 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4223 I40E_GLV_UPRCL(stat_idx),
4224 vsi->stat_offsets_loaded,
4225 &oes->rx_unicast, &es->rx_unicast);
4226 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4227 I40E_GLV_MPRCL(stat_idx),
4228 vsi->stat_offsets_loaded,
4229 &oes->rx_multicast, &es->rx_multicast);
4230 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4231 I40E_GLV_BPRCL(stat_idx),
4232 vsi->stat_offsets_loaded,
4233 &oes->rx_broadcast, &es->rx_broadcast);
4235 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4236 I40E_GLV_GOTCL(stat_idx),
4237 vsi->stat_offsets_loaded,
4238 &oes->tx_bytes, &es->tx_bytes);
4239 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4240 I40E_GLV_UPTCL(stat_idx),
4241 vsi->stat_offsets_loaded,
4242 &oes->tx_unicast, &es->tx_unicast);
4243 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4244 I40E_GLV_MPTCL(stat_idx),
4245 vsi->stat_offsets_loaded,
4246 &oes->tx_multicast, &es->tx_multicast);
4247 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4248 I40E_GLV_BPTCL(stat_idx),
4249 vsi->stat_offsets_loaded,
4250 &oes->tx_broadcast, &es->tx_broadcast);
4251 vsi->stat_offsets_loaded = true;
4255 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4259 struct i40e_eth_stats *es;
4262 struct i40e_hw_port_stats *nsd;
4266 es = &vsi->eth_stats;
4269 ixl_update_eth_stats(vsi);
4271 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4272 for (int i = 0; i < vsi->num_queues; i++)
4273 tx_discards += vsi->queues[i].txr.br->br_drops;
4275 /* Update ifnet stats */
4276 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4279 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4282 IXL_SET_IBYTES(vsi, es->rx_bytes);
4283 IXL_SET_OBYTES(vsi, es->tx_bytes);
4284 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4285 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4287 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4288 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4290 IXL_SET_OERRORS(vsi, es->tx_errors);
4291 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4292 IXL_SET_OQDROPS(vsi, tx_discards);
4293 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4294 IXL_SET_COLLISIONS(vsi, 0);
4298 * Reset all of the stats for the given pf
4301 ixl_pf_reset_stats(struct ixl_pf *pf)
4303 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4304 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4305 pf->stat_offsets_loaded = false;
4309 * Resets all stats of the given vsi
4312 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4314 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4315 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4316 vsi->stat_offsets_loaded = false;
4320 * Read and update a 48 bit stat from the hw
4322 * Since the device stats are not reset at PFReset, they likely will not
4323 * be zeroed when the driver starts. We'll save the first values read
4324 * and use them as offsets to be subtracted from the raw values in order
4325 * to report stats that count from zero.
4328 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4329 bool offset_loaded, u64 *offset, u64 *stat)
4333 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4334 new_data = rd64(hw, loreg);
4337 * Use two rd32's instead of one rd64; FreeBSD versions before
4338 * 10 don't support 64-bit bus reads/writes.
4340 new_data = rd32(hw, loreg);
4341 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4346 if (new_data >= *offset)
4347 *stat = new_data - *offset;
4349 *stat = (new_data + ((u64)1 << 48)) - *offset;
4350 *stat &= 0xFFFFFFFFFFFFULL;
4354 * Read and update a 32 bit stat from the hw
4357 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4358 bool offset_loaded, u64 *offset, u64 *stat)
4362 new_data = rd32(hw, reg);
4365 if (new_data >= *offset)
4366 *stat = (u32)(new_data - *offset);
4368 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4372 ixl_add_device_sysctls(struct ixl_pf *pf)
4374 device_t dev = pf->dev;
4375 struct i40e_hw *hw = &pf->hw;
4377 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4378 struct sysctl_oid_list *ctx_list =
4379 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4381 struct sysctl_oid *debug_node;
4382 struct sysctl_oid_list *debug_list;
4384 struct sysctl_oid *fec_node;
4385 struct sysctl_oid_list *fec_list;
4387 struct sysctl_oid *eee_node;
4388 struct sysctl_oid_list *eee_list;
4390 /* Set up sysctls */
4391 SYSCTL_ADD_PROC(ctx, ctx_list,
4392 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4393 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4395 SYSCTL_ADD_PROC(ctx, ctx_list,
4396 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4397 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4399 SYSCTL_ADD_PROC(ctx, ctx_list,
4400 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
4401 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
4403 SYSCTL_ADD_PROC(ctx, ctx_list,
4404 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4405 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
4407 SYSCTL_ADD_PROC(ctx, ctx_list,
4408 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4409 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4411 SYSCTL_ADD_PROC(ctx, ctx_list,
4412 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4413 pf, 0, ixl_sysctl_unallocated_queues, "I",
4414 "Queues not allocated to a PF or VF");
4416 SYSCTL_ADD_PROC(ctx, ctx_list,
4417 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4418 pf, 0, ixl_sysctl_pf_tx_itr, "I",
4419 "Immediately set TX ITR value for all queues");
4421 SYSCTL_ADD_PROC(ctx, ctx_list,
4422 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4423 pf, 0, ixl_sysctl_pf_rx_itr, "I",
4424 "Immediately set RX ITR value for all queues");
4426 SYSCTL_ADD_INT(ctx, ctx_list,
4427 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4428 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4430 SYSCTL_ADD_INT(ctx, ctx_list,
4431 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4432 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4434 SYSCTL_ADD_INT(ctx, ctx_list,
4435 OID_AUTO, "tx_ring_size", CTLFLAG_RD,
4436 &pf->vsi.num_tx_desc, 0, "TX ring size");
4438 SYSCTL_ADD_INT(ctx, ctx_list,
4439 OID_AUTO, "rx_ring_size", CTLFLAG_RD,
4440 &pf->vsi.num_rx_desc, 0, "RX ring size");
4442 /* Add FEC sysctls for 25G adapters */
4443 if (i40e_is_25G_device(hw->device_id)) {
4444 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4445 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4446 fec_list = SYSCTL_CHILDREN(fec_node);
4448 SYSCTL_ADD_PROC(ctx, fec_list,
4449 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
4450 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4452 SYSCTL_ADD_PROC(ctx, fec_list,
4453 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
4454 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4456 SYSCTL_ADD_PROC(ctx, fec_list,
4457 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
4458 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4460 SYSCTL_ADD_PROC(ctx, fec_list,
4461 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
4462 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4464 SYSCTL_ADD_PROC(ctx, fec_list,
4465 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
4466 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4469 SYSCTL_ADD_PROC(ctx, ctx_list,
4470 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
4471 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
4473 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4474 OID_AUTO, "eee", CTLFLAG_RD, NULL,
4475 "Energy Efficient Ethernet (EEE) Sysctls");
4476 eee_list = SYSCTL_CHILDREN(eee_node);
4478 SYSCTL_ADD_PROC(ctx, eee_list,
4479 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW,
4480 pf, 0, ixl_sysctl_eee_enable, "I",
4481 "Enable Energy Efficient Ethernet (EEE)");
4483 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4484 CTLFLAG_RD, &pf->stats.tx_lpi_status, 0,
4487 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4488 CTLFLAG_RD, &pf->stats.rx_lpi_status, 0,
4491 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
4492 CTLFLAG_RD, &pf->stats.tx_lpi_count,
4495 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
4496 CTLFLAG_RD, &pf->stats.rx_lpi_count,
4498 /* Add sysctls meant to print debug information, but don't list them
4499 * in "sysctl -a" output. */
4500 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4501 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4502 debug_list = SYSCTL_CHILDREN(debug_node);
4504 SYSCTL_ADD_UINT(ctx, debug_list,
4505 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4506 &pf->hw.debug_mask, 0, "Shared code debug message level");
4508 SYSCTL_ADD_UINT(ctx, debug_list,
4509 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4510 &pf->dbg_mask, 0, "Non-hared code debug message level");
4512 SYSCTL_ADD_PROC(ctx, debug_list,
4513 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4514 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4516 SYSCTL_ADD_PROC(ctx, debug_list,
4517 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4518 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4520 SYSCTL_ADD_PROC(ctx, debug_list,
4521 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4522 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4524 SYSCTL_ADD_PROC(ctx, debug_list,
4525 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4526 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4528 SYSCTL_ADD_PROC(ctx, debug_list,
4529 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4530 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4532 SYSCTL_ADD_PROC(ctx, debug_list,
4533 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4534 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4536 SYSCTL_ADD_PROC(ctx, debug_list,
4537 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4538 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4540 SYSCTL_ADD_PROC(ctx, debug_list,
4541 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4542 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4544 SYSCTL_ADD_PROC(ctx, debug_list,
4545 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4546 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4548 SYSCTL_ADD_PROC(ctx, debug_list,
4549 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
4550 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
4553 SYSCTL_ADD_PROC(ctx, debug_list,
4554 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4555 pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4557 SYSCTL_ADD_PROC(ctx, debug_list,
4558 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4559 pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4561 SYSCTL_ADD_PROC(ctx, debug_list,
4562 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
4563 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
4567 SYSCTL_ADD_UINT(ctx, debug_list,
4568 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4569 0, "PF/VF Virtual Channel debug level");
4574 * Primarily for finding out how many queues can be assigned to VFs,
4578 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4580 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4584 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4587 return sysctl_handle_int(oidp, NULL, queues, req);
4591 ** Set flow control using sysctl:
4598 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4600 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4601 struct i40e_hw *hw = &pf->hw;
4602 device_t dev = pf->dev;
4603 int requested_fc, error = 0;
4604 enum i40e_status_code aq_error = 0;
4608 requested_fc = pf->fc;
4609 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4610 if ((error) || (req->newptr == NULL))
4612 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
4613 device_printf(dev, "Interface is currently in FW recovery mode. "
4614 "Setting flow control not supported\n");
4617 if (requested_fc < 0 || requested_fc > 3) {
4619 "Invalid fc mode; valid modes are 0 through 3\n");
4623 /* Set fc ability for port */
4624 hw->fc.requested_mode = requested_fc;
4625 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4628 "%s: Error setting new fc mode %d; fc_err %#x\n",
4629 __func__, aq_error, fc_aq_err);
4632 pf->fc = requested_fc;
4634 /* Get new link state */
4635 i40e_msec_delay(250);
4636 hw->phy.get_link_info = TRUE;
4637 i40e_get_link_status(hw, &pf->link_up);
4644 ixl_link_speed_string(u8 link_speed)
4646 const char * link_speed_str[] = {
4659 switch (link_speed) {
4660 case I40E_LINK_SPEED_100MB:
4663 case I40E_LINK_SPEED_1GB:
4666 case I40E_LINK_SPEED_10GB:
4669 case I40E_LINK_SPEED_40GB:
4672 case I40E_LINK_SPEED_20GB:
4675 case I40E_LINK_SPEED_25GB:
4678 case I40E_LINK_SPEED_2_5GB:
4681 case I40E_LINK_SPEED_5GB:
4684 case I40E_LINK_SPEED_UNKNOWN:
4690 return (link_speed_str[index]);
4694 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
4696 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4697 struct i40e_hw *hw = &pf->hw;
4700 ixl_update_link_status(pf);
4702 error = sysctl_handle_string(oidp,
4704 ixl_link_speed_string(hw->phy.link_info.link_speed)),
4711 * Converts 8-bit speeds value to and from sysctl flags and
4712 * Admin Queue flags.
4715 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4717 #define SPEED_MAP_SIZE 8
4718 static u16 speedmap[SPEED_MAP_SIZE] = {
4719 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4720 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
4721 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
4722 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
4723 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
4724 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
4725 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
4726 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
4730 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
4732 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4734 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4741 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
4743 struct i40e_hw *hw = &pf->hw;
4744 device_t dev = pf->dev;
4745 struct i40e_aq_get_phy_abilities_resp abilities;
4746 struct i40e_aq_set_phy_config config;
4747 enum i40e_status_code aq_error = 0;
4749 /* Get current capability information */
4750 aq_error = i40e_aq_get_phy_capabilities(hw,
4751 FALSE, FALSE, &abilities, NULL);
4754 "%s: Error getting phy capabilities %d,"
4755 " aq error: %d\n", __func__, aq_error,
4756 hw->aq.asq_last_status);
4760 /* Prepare new config */
4761 bzero(&config, sizeof(config));
4763 config.link_speed = speeds;
4765 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4766 config.phy_type = abilities.phy_type;
4767 config.phy_type_ext = abilities.phy_type_ext;
4768 config.abilities = abilities.abilities
4769 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4770 config.eee_capability = abilities.eee_capability;
4771 config.eeer = abilities.eeer_val;
4772 config.low_power_ctrl = abilities.d3_lpan;
4773 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
4774 & I40E_AQ_PHY_FEC_CONFIG_MASK;
4776 /* Do aq command & restart link */
4777 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4780 "%s: Error setting new phy config %d,"
4781 " aq error: %d\n", __func__, aq_error,
4782 hw->aq.asq_last_status);
4790 ** Supported link speeds
4802 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
4804 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4805 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
4807 return sysctl_handle_int(oidp, NULL, supported, req);
4811 ** Control link advertise speed:
4813 ** 0x1 - advertise 100 Mb
4814 ** 0x2 - advertise 1G
4815 ** 0x4 - advertise 10G
4816 ** 0x8 - advertise 20G
4817 ** 0x10 - advertise 25G
4818 ** 0x20 - advertise 40G
4819 ** 0x40 - advertise 2.5G
4820 ** 0x80 - advertise 5G
4822 ** Set to 0 to disable link
4825 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
4827 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4828 device_t dev = pf->dev;
4829 u8 converted_speeds;
4830 int requested_ls = 0;
4833 /* Read in new mode */
4834 requested_ls = pf->advertised_speed;
4835 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4836 if ((error) || (req->newptr == NULL))
4838 if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
4839 device_printf(dev, "Interface is currently in FW recovery mode. "
4840 "Setting advertise speed not supported\n");
4844 /* Error out if bits outside of possible flag range are set */
4845 if ((requested_ls & ~((u8)0xFF)) != 0) {
4846 device_printf(dev, "Input advertised speed out of range; "
4847 "valid flags are: 0x%02x\n",
4848 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4852 /* Check if adapter supports input value */
4853 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4854 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4855 device_printf(dev, "Invalid advertised speed; "
4856 "valid flags are: 0x%02x\n",
4857 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4861 error = ixl_set_advertised_speeds(pf, requested_ls, false);
4865 pf->advertised_speed = requested_ls;
4866 ixl_update_link_status(pf);
4871 * Input: bitmap of enum i40e_aq_link_speed
4874 ixl_max_aq_speed_to_value(u8 link_speeds)
4876 if (link_speeds & I40E_LINK_SPEED_40GB)
4878 if (link_speeds & I40E_LINK_SPEED_25GB)
4880 if (link_speeds & I40E_LINK_SPEED_20GB)
4882 if (link_speeds & I40E_LINK_SPEED_10GB)
4884 if (link_speeds & I40E_LINK_SPEED_5GB)
4886 if (link_speeds & I40E_LINK_SPEED_2_5GB)
4887 return IF_Mbps(2500);
4888 if (link_speeds & I40E_LINK_SPEED_1GB)
4890 if (link_speeds & I40E_LINK_SPEED_100MB)
4891 return IF_Mbps(100);
4893 /* Minimum supported link speed */
4894 return IF_Mbps(100);
4898 ** Get the width and transaction speed of
4899 ** the bus this adapter is plugged into.
4902 ixl_get_bus_info(struct ixl_pf *pf)
4904 struct i40e_hw *hw = &pf->hw;
4905 device_t dev = pf->dev;
4907 u32 offset, num_ports;
4910 /* Some devices don't use PCIE */
4911 if (hw->mac.type == I40E_MAC_X722)
4914 /* Read PCI Express Capabilities Link Status Register */
4915 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4916 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4918 /* Fill out hw struct with PCIE info */
4919 i40e_set_pci_config_data(hw, link);
4921 /* Use info to print out bandwidth messages */
4922 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4923 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4924 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4925 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4926 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4927 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4928 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4929 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4933 * If adapter is in slot with maximum supported speed,
4934 * no warning message needs to be printed out.
4936 if (hw->bus.speed >= i40e_bus_speed_8000
4937 && hw->bus.width >= i40e_bus_width_pcie_x8)
4940 num_ports = bitcount32(hw->func_caps.valid_functions);
4941 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4943 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4944 device_printf(dev, "PCI-Express bandwidth available"
4945 " for this device may be insufficient for"
4946 " optimal performance.\n");
4947 device_printf(dev, "Please move the device to a different"
4948 " PCI-e link with more lanes and/or higher"
4949 " transfer rate.\n");
4954 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4956 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4957 struct i40e_hw *hw = &pf->hw;
4960 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4961 ixl_nvm_version_str(hw, sbuf);
4969 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4971 if ((nvma->command == I40E_NVM_READ) &&
4972 ((nvma->config & 0xFF) == 0xF) &&
4973 (((nvma->config & 0xF00) >> 8) == 0xF) &&
4974 (nvma->offset == 0) &&
4975 (nvma->data_size == 1)) {
4976 // device_printf(dev, "- Get Driver Status Command\n");
4978 else if (nvma->command == I40E_NVM_READ) {
4982 switch (nvma->command) {
4984 device_printf(dev, "- command: I40E_NVM_READ\n");
4987 device_printf(dev, "- command: I40E_NVM_WRITE\n");
4990 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4994 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
4995 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4996 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4997 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
5002 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5004 struct i40e_hw *hw = &pf->hw;
5005 struct i40e_nvm_access *nvma;
5006 device_t dev = pf->dev;
5007 enum i40e_status_code status = 0;
5008 size_t nvma_size, ifd_len, exp_len;
5011 DEBUGFUNC("ixl_handle_nvmupd_cmd");
5014 nvma_size = sizeof(struct i40e_nvm_access);
5015 ifd_len = ifd->ifd_len;
5017 if (ifd_len < nvma_size ||
5018 ifd->ifd_data == NULL) {
5019 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
5021 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
5022 __func__, ifd_len, nvma_size);
5023 device_printf(dev, "%s: data pointer: %p\n", __func__,
5028 nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
5029 err = copyin(ifd->ifd_data, nvma, ifd_len);
5031 device_printf(dev, "%s: Cannot get request from user space\n",
5033 free(nvma, M_DEVBUF);
5037 if (pf->dbg_mask & IXL_DBG_NVMUPD)
5038 ixl_print_nvm_cmd(dev, nvma);
5040 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5042 while (count++ < 100) {
5043 i40e_msec_delay(100);
5044 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5049 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5050 free(nvma, M_DEVBUF);
5054 if (nvma->data_size < 1 || nvma->data_size > 4096) {
5055 device_printf(dev, "%s: invalid request, data size not in supported range\n",
5057 free(nvma, M_DEVBUF);
5062 * Older versions of the NVM update tool don't set ifd_len to the size
5063 * of the entire buffer passed to the ioctl. Check the data_size field
5064 * in the contained i40e_nvm_access struct and ensure everything is
5065 * copied in from userspace.
5067 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
5069 if (ifd_len < exp_len) {
5071 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
5072 err = copyin(ifd->ifd_data, nvma, ifd_len);
5074 device_printf(dev, "%s: Cannot get request from user space\n",
5076 free(nvma, M_DEVBUF);
5082 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5085 err = copyout(nvma, ifd->ifd_data, ifd_len);
5086 free(nvma, M_DEVBUF);
5088 device_printf(dev, "%s: Cannot return data to user space\n",
5093 /* Let the nvmupdate report errors, show them only when debug is enabled */
5094 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
5095 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
5096 i40e_stat_str(hw, status), perrno);
5099 * -EPERM is actually ERESTART, which the kernel interprets as it needing
5100 * to run this ioctl again. So use -EACCES for -EPERM instead.
5102 if (perrno == -EPERM)
5109 ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *pf, struct ifreq *ifr)
5111 struct ifi2creq i2c;
5115 if (pf->read_i2c_byte == NULL)
5119 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5121 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
5126 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5130 if (i2c.len > sizeof(i2c.data)) {
5135 for (i = 0; i < i2c.len; ++i) {
5136 if (pf->read_i2c_byte(pf, i2c.offset + i,
5137 i2c.dev_addr, &i2c.data[i]))
5142 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5144 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
5150 /*********************************************************************
5152 * Media Ioctl callback
5154 * This routine is called whenever the user queries the status of
5155 * the interface using ifconfig.
5157 * When adding new media types here, make sure to add them to
5158 * ixl_add_ifmedia(), too.
5160 **********************************************************************/
5162 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
5164 struct ixl_vsi *vsi = ifp->if_softc;
5165 struct ixl_pf *pf = vsi->back;
5166 struct i40e_hw *hw = &pf->hw;
5168 INIT_DEBUGOUT("ixl_media_status: begin");
5170 /* Don't touch PF during reset */
5171 if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
5176 i40e_get_link_status(hw, &pf->link_up);
5177 ixl_update_link_status(pf);
5179 ifmr->ifm_status = IFM_AVALID;
5180 ifmr->ifm_active = IFM_ETHER;
5187 ifmr->ifm_status |= IFM_ACTIVE;
5189 /* Hardware always does full-duplex */
5190 ifmr->ifm_active |= IFM_FDX;
5192 switch (hw->phy.link_info.phy_type) {
5194 case I40E_PHY_TYPE_100BASE_TX:
5195 ifmr->ifm_active |= IFM_100_TX;
5198 case I40E_PHY_TYPE_1000BASE_T:
5199 ifmr->ifm_active |= IFM_1000_T;
5201 case I40E_PHY_TYPE_1000BASE_SX:
5202 ifmr->ifm_active |= IFM_1000_SX;
5204 case I40E_PHY_TYPE_1000BASE_LX:
5205 ifmr->ifm_active |= IFM_1000_LX;
5207 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
5208 ifmr->ifm_active |= IFM_1000_T;
5211 case I40E_PHY_TYPE_2_5GBASE_T:
5212 ifmr->ifm_active |= IFM_2500_T;
5215 case I40E_PHY_TYPE_5GBASE_T:
5216 ifmr->ifm_active |= IFM_5000_T;
5219 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
5220 ifmr->ifm_active |= IFM_10G_TWINAX;
5222 case I40E_PHY_TYPE_10GBASE_SR:
5223 ifmr->ifm_active |= IFM_10G_SR;
5225 case I40E_PHY_TYPE_10GBASE_LR:
5226 ifmr->ifm_active |= IFM_10G_LR;
5228 case I40E_PHY_TYPE_10GBASE_T:
5229 ifmr->ifm_active |= IFM_10G_T;
5231 case I40E_PHY_TYPE_XAUI:
5232 case I40E_PHY_TYPE_XFI:
5233 ifmr->ifm_active |= IFM_10G_TWINAX;
5235 case I40E_PHY_TYPE_10GBASE_AOC:
5236 ifmr->ifm_active |= IFM_10G_AOC;
5239 case I40E_PHY_TYPE_25GBASE_KR:
5240 ifmr->ifm_active |= IFM_25G_KR;
5242 case I40E_PHY_TYPE_25GBASE_CR:
5243 ifmr->ifm_active |= IFM_25G_CR;
5245 case I40E_PHY_TYPE_25GBASE_SR:
5246 ifmr->ifm_active |= IFM_25G_SR;
5248 case I40E_PHY_TYPE_25GBASE_LR:
5249 ifmr->ifm_active |= IFM_25G_LR;
5251 case I40E_PHY_TYPE_25GBASE_AOC:
5252 ifmr->ifm_active |= IFM_25G_AOC;
5254 case I40E_PHY_TYPE_25GBASE_ACC:
5255 ifmr->ifm_active |= IFM_25G_ACC;
5258 case I40E_PHY_TYPE_40GBASE_CR4:
5259 case I40E_PHY_TYPE_40GBASE_CR4_CU:
5260 ifmr->ifm_active |= IFM_40G_CR4;
5262 case I40E_PHY_TYPE_40GBASE_SR4:
5263 ifmr->ifm_active |= IFM_40G_SR4;
5265 case I40E_PHY_TYPE_40GBASE_LR4:
5266 ifmr->ifm_active |= IFM_40G_LR4;
5268 case I40E_PHY_TYPE_XLAUI:
5269 ifmr->ifm_active |= IFM_OTHER;
5271 case I40E_PHY_TYPE_1000BASE_KX:
5272 ifmr->ifm_active |= IFM_1000_KX;
5274 case I40E_PHY_TYPE_SGMII:
5275 ifmr->ifm_active |= IFM_1000_SGMII;
5277 /* ERJ: What's the difference between these? */
5278 case I40E_PHY_TYPE_10GBASE_CR1_CU:
5279 case I40E_PHY_TYPE_10GBASE_CR1:
5280 ifmr->ifm_active |= IFM_10G_CR1;
5282 case I40E_PHY_TYPE_10GBASE_KX4:
5283 ifmr->ifm_active |= IFM_10G_KX4;
5285 case I40E_PHY_TYPE_10GBASE_KR:
5286 ifmr->ifm_active |= IFM_10G_KR;
5288 case I40E_PHY_TYPE_SFI:
5289 ifmr->ifm_active |= IFM_10G_SFI;
5291 /* Our single 20G media type */
5292 case I40E_PHY_TYPE_20GBASE_KR2:
5293 ifmr->ifm_active |= IFM_20G_KR2;
5295 case I40E_PHY_TYPE_40GBASE_KR4:
5296 ifmr->ifm_active |= IFM_40G_KR4;
5298 case I40E_PHY_TYPE_XLPPI:
5299 case I40E_PHY_TYPE_40GBASE_AOC:
5300 ifmr->ifm_active |= IFM_40G_XLPPI;
5302 /* Unknown to driver */
5304 ifmr->ifm_active |= IFM_UNKNOWN;
5307 /* Report flow control status as well */
5308 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
5309 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
5310 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
5311 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
5319 struct ixl_pf *pf = arg;
5322 ixl_init_locked(pf);
5327 * NOTE: Fortville does not support forcing media speeds. Instead,
5328 * use the set_advertise sysctl to set the speeds Fortville
5329 * will advertise or be allowed to operate at.
5332 ixl_media_change(struct ifnet * ifp)
5334 struct ixl_vsi *vsi = ifp->if_softc;
5335 struct ifmedia *ifm = &vsi->media;
5337 INIT_DEBUGOUT("ixl_media_change: begin");
5339 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5342 if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5347 /*********************************************************************
5350 * ixl_ioctl is called when the user wants to configure the
5353 * return 0 on success, positive on failure
5354 **********************************************************************/
5357 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5359 struct ixl_vsi *vsi = ifp->if_softc;
5360 struct ixl_pf *pf = vsi->back;
5361 struct ifreq *ifr = (struct ifreq *)data;
5362 struct ifdrv *ifd = (struct ifdrv *)data;
5363 #if defined(INET) || defined(INET6)
5364 struct ifaddr *ifa = (struct ifaddr *)data;
5365 bool avoid_reset = FALSE;
5369 if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) {
5370 /* We are in recovery mode supporting only NVM update */
5374 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5377 /* NVM update command */
5378 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5379 error = ixl_handle_nvmupd_cmd(pf, ifd);
5394 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5396 if (ifa->ifa_addr->sa_family == AF_INET)
5400 if (ifa->ifa_addr->sa_family == AF_INET6)
5403 #if defined(INET) || defined(INET6)
5405 ** Calling init results in link renegotiation,
5406 ** so we avoid doing it when possible.
5409 ifp->if_flags |= IFF_UP;
5410 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5413 if (!(ifp->if_flags & IFF_NOARP))
5414 arp_ifinit(ifp, ifa);
5417 error = ether_ioctl(ifp, command, data);
5421 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5422 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5423 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5427 ifp->if_mtu = ifr->ifr_mtu;
5428 vsi->max_frame_size =
5429 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5430 + ETHER_VLAN_ENCAP_LEN;
5431 ixl_init_locked(pf);
5436 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5438 if (ifp->if_flags & IFF_UP) {
5439 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5440 if ((ifp->if_flags ^ pf->if_flags) &
5441 (IFF_PROMISC | IFF_ALLMULTI)) {
5442 ixl_set_promisc(vsi);
5445 ixl_init_locked(pf);
5446 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5447 ixl_stop_locked(pf);
5449 pf->if_flags = ifp->if_flags;
5454 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5457 /* NVM update command */
5458 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5459 error = ixl_handle_nvmupd_cmd(pf, ifd);
5464 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5465 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5467 ixl_disable_rings_intr(vsi);
5469 ixl_enable_intr(vsi);
5474 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5475 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5477 ixl_disable_rings_intr(vsi);
5479 ixl_enable_intr(vsi);
5486 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5487 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5491 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5492 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5494 ixl_cap_txcsum_tso(vsi, ifp, mask);
5496 if (mask & IFCAP_RXCSUM)
5497 ifp->if_capenable ^= IFCAP_RXCSUM;
5498 if (mask & IFCAP_RXCSUM_IPV6)
5499 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5500 if (mask & IFCAP_LRO)
5501 ifp->if_capenable ^= IFCAP_LRO;
5502 if (mask & IFCAP_VLAN_HWTAGGING)
5503 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5504 if (mask & IFCAP_VLAN_HWFILTER)
5505 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5506 if (mask & IFCAP_VLAN_HWTSO)
5507 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5508 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5510 ixl_init_locked(pf);
5513 VLAN_CAPABILITIES(ifp);
5517 #if __FreeBSD_version >= 1003000
5520 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5524 error = ixl_handle_i2c_eeprom_read_cmd(pf, ifr);
5529 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5530 error = ether_ioctl(ifp, command, data);
5538 ixl_find_i2c_interface(struct ixl_pf *pf)
5540 struct i40e_hw *hw = &pf->hw;
5541 bool i2c_en, port_matched;
5544 for (int i = 0; i < 4; i++) {
5545 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5546 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5547 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5548 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5550 if (i2c_en && port_matched)
5558 ixl_phy_type_string(u32 bit_pos, bool ext)
5560 static char * phy_types_str[32] = {
5590 "1000BASE-T Optical",
5594 static char * ext_phy_types_str[8] = {
5605 if (ext && bit_pos > 7) return "Invalid_Ext";
5606 if (bit_pos > 31) return "Invalid";
5608 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5612 ixl_phy_type_string_ls(u8 val)
5615 return ixl_phy_type_string(val - 0x1F, true);
5617 return ixl_phy_type_string(val, false);
5621 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5623 device_t dev = pf->dev;
5624 struct i40e_hw *hw = &pf->hw;
5625 struct i40e_aq_desc desc;
5626 enum i40e_status_code status;
5628 struct i40e_aqc_get_link_status *aq_link_status =
5629 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5631 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5632 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5633 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5636 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5637 __func__, i40e_stat_str(hw, status),
5638 i40e_aq_str(hw, hw->aq.asq_last_status));
5642 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5647 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5649 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5650 device_t dev = pf->dev;
5654 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5656 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5660 struct i40e_aqc_get_link_status link_status;
5661 error = ixl_aq_get_link_status(pf, &link_status);
5667 sbuf_printf(buf, "\n"
5668 "PHY Type : 0x%02x<%s>\n"
5670 "Link info: 0x%02x\n"
5671 "AN info : 0x%02x\n"
5672 "Ext info : 0x%02x\n"
5673 "Loopback : 0x%02x\n"
5677 link_status.phy_type,
5678 ixl_phy_type_string_ls(link_status.phy_type),
5679 link_status.link_speed,
5680 link_status.link_info,
5681 link_status.an_info,
5682 link_status.ext_info,
5683 link_status.loopback,
5684 link_status.max_frame_size,
5686 link_status.power_desc);
5688 error = sbuf_finish(buf);
5690 device_printf(dev, "Error finishing sbuf: %d\n", error);
5697 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5699 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5700 struct i40e_hw *hw = &pf->hw;
5701 device_t dev = pf->dev;
5702 enum i40e_status_code status;
5703 struct i40e_aq_get_phy_abilities_resp abilities;
5707 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5709 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5713 status = i40e_aq_get_phy_capabilities(hw,
5714 FALSE, FALSE, &abilities, NULL);
5717 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5718 __func__, i40e_stat_str(hw, status),
5719 i40e_aq_str(hw, hw->aq.asq_last_status));
5724 sbuf_printf(buf, "\n"
5726 abilities.phy_type);
5728 if (abilities.phy_type != 0) {
5729 sbuf_printf(buf, "<");
5730 for (int i = 0; i < 32; i++)
5731 if ((1 << i) & abilities.phy_type)
5732 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5733 sbuf_printf(buf, ">");
5736 sbuf_printf(buf, "\nPHY Ext : %02x",
5737 abilities.phy_type_ext);
5739 if (abilities.phy_type_ext != 0) {
5740 sbuf_printf(buf, "<");
5741 for (int i = 0; i < 4; i++)
5742 if ((1 << i) & abilities.phy_type_ext)
5743 sbuf_printf(buf, "%s,",
5744 ixl_phy_type_string(i, true));
5745 sbuf_printf(buf, ">");
5748 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
5749 if (abilities.link_speed != 0) {
5751 sbuf_printf(buf, " <");
5752 for (int i = 0; i < 8; i++) {
5753 link_speed = (1 << i) & abilities.link_speed;
5755 sbuf_printf(buf, "%s, ",
5756 ixl_link_speed_string(link_speed));
5758 sbuf_printf(buf, ">");
5761 sbuf_printf(buf, "\n"
5766 "ID : %02x %02x %02x %02x\n"
5767 "ModType : %02x %02x %02x\n"
5771 abilities.abilities, abilities.eee_capability,
5772 abilities.eeer_val, abilities.d3_lpan,
5773 abilities.phy_id[0], abilities.phy_id[1],
5774 abilities.phy_id[2], abilities.phy_id[3],
5775 abilities.module_type[0], abilities.module_type[1],
5776 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
5777 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
5778 abilities.ext_comp_code);
5780 error = sbuf_finish(buf);
5782 device_printf(dev, "Error finishing sbuf: %d\n", error);
5789 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5791 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5792 struct ixl_vsi *vsi = &pf->vsi;
5793 struct ixl_mac_filter *f;
5798 int ftl_counter = 0;
5802 SLIST_FOREACH(f, &vsi->ftl, next) {
5807 sysctl_handle_string(oidp, "(none)", 6, req);
5811 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5812 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5814 sprintf(buf_i++, "\n");
5815 SLIST_FOREACH(f, &vsi->ftl, next) {
5817 MAC_FORMAT ", vlan %4d, flags %#06x",
5818 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5820 /* don't print '\n' for last entry */
5821 if (++ftl_counter != ftl_len) {
5822 sprintf(buf_i, "\n");
5827 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5829 printf("sysctl error: %d\n", error);
5830 free(buf, M_DEVBUF);
5834 #define IXL_SW_RES_SIZE 0x14
5836 ixl_res_alloc_cmp(const void *a, const void *b)
5838 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5839 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5840 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5842 return ((int)one->resource_type - (int)two->resource_type);
5846 * Longest string length: 25
5849 ixl_switch_res_type_string(u8 type)
5851 static char * ixl_switch_res_type_strings[0x14] = {
5854 "Perfect Match MAC address",
5857 "Multicast hash entry",
5858 "Unicast hash entry",
5862 "VLAN Statistic Pool",
5865 "Inner VLAN Forward filter",
5875 return ixl_switch_res_type_strings[type];
5877 return "(Reserved)";
5881 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5883 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5884 struct i40e_hw *hw = &pf->hw;
5885 device_t dev = pf->dev;
5887 enum i40e_status_code status;
5891 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5893 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5895 device_printf(dev, "Could not allocate sbuf for output.\n");
5899 bzero(resp, sizeof(resp));
5900 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5906 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5907 __func__, i40e_stat_str(hw, status),
5908 i40e_aq_str(hw, hw->aq.asq_last_status));
5913 /* Sort entries by type for display */
5914 qsort(resp, num_entries,
5915 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5916 &ixl_res_alloc_cmp);
5918 sbuf_cat(buf, "\n");
5919 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5921 " Type | Guaranteed | Total | Used | Un-allocated\n"
5922 " | (this) | (all) | (this) | (all) \n");
5923 for (int i = 0; i < num_entries; i++) {
5925 "%25s | %10d %5d %6d %12d",
5926 ixl_switch_res_type_string(resp[i].resource_type),
5930 resp[i].total_unalloced);
5931 if (i < num_entries - 1)
5932 sbuf_cat(buf, "\n");
5935 error = sbuf_finish(buf);
5937 device_printf(dev, "Error finishing sbuf: %d\n", error);
5944 ** Caller must init and delete sbuf; this function will clear and
5945 ** finish it for caller.
5947 ** XXX: Cannot use the SEID for this, since there is no longer a
5948 ** fixed mapping between SEID and element type.
5951 ixl_switch_element_string(struct sbuf *s,
5952 struct i40e_aqc_switch_config_element_resp *element)
5956 switch (element->element_type) {
5957 case I40E_AQ_SW_ELEM_TYPE_MAC:
5958 sbuf_printf(s, "MAC %3d", element->element_info);
5960 case I40E_AQ_SW_ELEM_TYPE_PF:
5961 sbuf_printf(s, "PF %3d", element->element_info);
5963 case I40E_AQ_SW_ELEM_TYPE_VF:
5964 sbuf_printf(s, "VF %3d", element->element_info);
5966 case I40E_AQ_SW_ELEM_TYPE_EMP:
5969 case I40E_AQ_SW_ELEM_TYPE_BMC:
5972 case I40E_AQ_SW_ELEM_TYPE_PV:
5975 case I40E_AQ_SW_ELEM_TYPE_VEB:
5978 case I40E_AQ_SW_ELEM_TYPE_PA:
5981 case I40E_AQ_SW_ELEM_TYPE_VSI:
5982 sbuf_printf(s, "VSI %3d", element->element_info);
5990 return sbuf_data(s);
5994 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5996 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5997 struct i40e_hw *hw = &pf->hw;
5998 device_t dev = pf->dev;
6001 enum i40e_status_code status;
6004 u8 aq_buf[I40E_AQ_LARGE_BUF];
6006 struct i40e_aqc_get_switch_config_resp *sw_config;
6007 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6009 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6011 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
6015 status = i40e_aq_get_switch_config(hw, sw_config,
6016 sizeof(aq_buf), &next, NULL);
6019 "%s: aq_get_switch_config() error %s, aq error %s\n",
6020 __func__, i40e_stat_str(hw, status),
6021 i40e_aq_str(hw, hw->aq.asq_last_status));
6026 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
6029 nmbuf = sbuf_new_auto();
6031 device_printf(dev, "Could not allocate sbuf for name output.\n");
6036 sbuf_cat(buf, "\n");
6037 /* Assuming <= 255 elements in switch */
6038 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
6039 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
6041 ** Revision -- all elements are revision 1 for now
6044 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
6045 " | | | (uplink)\n");
6046 for (int i = 0; i < sw_config->header.num_reported; i++) {
6047 // "%4d (%8s) | %8s %8s %#8x",
6048 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
6050 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
6051 &sw_config->element[i]));
6052 sbuf_cat(buf, " | ");
6053 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
6055 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
6057 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
6058 if (i < sw_config->header.num_reported - 1)
6059 sbuf_cat(buf, "\n");
6063 error = sbuf_finish(buf);
6065 device_printf(dev, "Error finishing sbuf: %d\n", error);
6073 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
6075 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6076 struct i40e_hw *hw = &pf->hw;
6077 device_t dev = pf->dev;
6080 enum i40e_status_code status;
6083 struct i40e_aqc_get_set_rss_key_data key_data;
6085 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6087 device_printf(dev, "Could not allocate sbuf for output.\n");
6091 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
6093 sbuf_cat(buf, "\n");
6094 if (hw->mac.type == I40E_MAC_X722) {
6095 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
6097 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
6098 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6100 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
6101 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6102 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
6106 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
6108 error = sbuf_finish(buf);
6110 device_printf(dev, "Error finishing sbuf: %d\n", error);
6117 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
6122 if (length < 1 || buf == NULL) return;
6124 int byte_stride = 16;
6125 int lines = length / byte_stride;
6126 int rem = length % byte_stride;
6130 for (i = 0; i < lines; i++) {
6131 width = (rem > 0 && i == lines - 1)
6132 ? rem : byte_stride;
6134 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
6136 for (j = 0; j < width; j++)
6137 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
6139 if (width < byte_stride) {
6140 for (k = 0; k < (byte_stride - width); k++)
6141 sbuf_printf(sb, " ");
6145 sbuf_printf(sb, "\n");
6149 for (j = 0; j < width; j++) {
6150 c = (char)buf[i * byte_stride + j];
6151 if (c < 32 || c > 126)
6152 sbuf_printf(sb, ".");
6154 sbuf_printf(sb, "%c", c);
6157 sbuf_printf(sb, "\n");
6163 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
6165 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6166 struct i40e_hw *hw = &pf->hw;
6167 device_t dev = pf->dev;
6170 enum i40e_status_code status;
6174 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6176 device_printf(dev, "Could not allocate sbuf for output.\n");
6180 bzero(hlut, sizeof(hlut));
6181 sbuf_cat(buf, "\n");
6182 if (hw->mac.type == I40E_MAC_X722) {
6183 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
6185 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
6186 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6188 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
6189 reg = rd32(hw, I40E_PFQF_HLUT(i));
6190 bcopy(®, &hlut[i << 2], 4);
6193 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
6195 error = sbuf_finish(buf);
6197 device_printf(dev, "Error finishing sbuf: %d\n", error);
6204 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
6206 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6207 struct i40e_hw *hw = &pf->hw;
6210 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
6211 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
6213 return sysctl_handle_long(oidp, NULL, hena, req);
6217 * Sysctl to disable firmware's link management
6219 * 1 - Disable link management on this port
6220 * 0 - Re-enable link management
6222 * On normal NVMs, firmware manages link by default.
6225 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
6227 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6228 struct i40e_hw *hw = &pf->hw;
6229 device_t dev = pf->dev;
6230 int requested_mode = -1;
6231 enum i40e_status_code status = 0;
6234 /* Read in new mode */
6235 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
6236 if ((error) || (req->newptr == NULL))
6238 /* Check for sane value */
6239 if (requested_mode < 0 || requested_mode > 1) {
6240 device_printf(dev, "Valid modes are 0 or 1\n");
6245 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
6248 "%s: Error setting new phy debug mode %s,"
6249 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
6250 i40e_aq_str(hw, hw->aq.asq_last_status));
6258 * Read some diagnostic data from a (Q)SFP+ module
6260 * SFP A2 QSFP Lower Page
6261 * Temperature 96-97 22-23
6263 * TX power 102-103 34-35..40-41
6264 * RX power 104-105 50-51..56-57
6267 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
6269 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6270 device_t dev = pf->dev;
6275 if (req->oldptr == NULL) {
6276 error = SYSCTL_OUT(req, 0, 128);
6280 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
6282 device_printf(dev, "Error reading from i2c\n");
6286 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
6287 if (output == 0x3) {
6290 * - Internally calibrated data
6291 * - Diagnostic monitoring is implemented
6293 pf->read_i2c_byte(pf, 92, 0xA0, &output);
6294 if (!(output & 0x60)) {
6295 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
6299 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6301 for (u8 offset = 96; offset < 100; offset++) {
6302 pf->read_i2c_byte(pf, offset, 0xA2, &output);
6303 sbuf_printf(sbuf, "%02X ", output);
6305 for (u8 offset = 102; offset < 106; offset++) {
6306 pf->read_i2c_byte(pf, offset, 0xA2, &output);
6307 sbuf_printf(sbuf, "%02X ", output);
6309 } else if (output == 0xD || output == 0x11) {
6311 * QSFP+ modules are always internally calibrated, and must indicate
6312 * what types of diagnostic monitoring are implemented
6314 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6316 for (u8 offset = 22; offset < 24; offset++) {
6317 pf->read_i2c_byte(pf, offset, 0xA0, &output);
6318 sbuf_printf(sbuf, "%02X ", output);
6320 for (u8 offset = 26; offset < 28; offset++) {
6321 pf->read_i2c_byte(pf, offset, 0xA0, &output);
6322 sbuf_printf(sbuf, "%02X ", output);
6324 /* Read the data from the first lane */
6325 for (u8 offset = 34; offset < 36; offset++) {
6326 pf->read_i2c_byte(pf, offset, 0xA0, &output);
6327 sbuf_printf(sbuf, "%02X ", output);
6329 for (u8 offset = 50; offset < 52; offset++) {
6330 pf->read_i2c_byte(pf, offset, 0xA0, &output);
6331 sbuf_printf(sbuf, "%02X ", output);
6334 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
6345 * Sysctl to read a byte from I2C bus.
6347 * Input: 32-bit value:
6348 * bits 0-7: device address (0xA0 or 0xA2)
6349 * bits 8-15: offset (0-255)
6350 * bits 16-31: unused
6351 * Output: 8-bit value read
6354 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
6356 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6357 device_t dev = pf->dev;
6358 int input = -1, error = 0;
6360 u8 dev_addr, offset, output;
6362 /* Read in I2C read parameters */
6363 error = sysctl_handle_int(oidp, &input, 0, req);
6364 if ((error) || (req->newptr == NULL))
6366 /* Validate device address */
6367 dev_addr = input & 0xFF;
6368 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6371 offset = (input >> 8) & 0xFF;
6373 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
6377 device_printf(dev, "%02X\n", output);
6382 * Sysctl to write a byte to the I2C bus.
6384 * Input: 32-bit value:
6385 * bits 0-7: device address (0xA0 or 0xA2)
6386 * bits 8-15: offset (0-255)
6387 * bits 16-23: value to write
6388 * bits 24-31: unused
6389 * Output: 8-bit value written
6392 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
6394 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6395 device_t dev = pf->dev;
6396 int input = -1, error = 0;
6398 u8 dev_addr, offset, value;
6400 /* Read in I2C write parameters */
6401 error = sysctl_handle_int(oidp, &input, 0, req);
6402 if ((error) || (req->newptr == NULL))
6404 /* Validate device address */
6405 dev_addr = input & 0xFF;
6406 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6409 offset = (input >> 8) & 0xFF;
6410 value = (input >> 16) & 0xFF;
6412 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
6416 device_printf(dev, "%02X written\n", value);
6421 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6422 u8 bit_pos, int *is_set)
6424 device_t dev = pf->dev;
6425 struct i40e_hw *hw = &pf->hw;
6426 enum i40e_status_code status;
6428 status = i40e_aq_get_phy_capabilities(hw,
6429 FALSE, FALSE, abilities, NULL);
6432 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
6433 __func__, i40e_stat_str(hw, status),
6434 i40e_aq_str(hw, hw->aq.asq_last_status));
6438 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
6443 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6444 u8 bit_pos, int set)
6446 device_t dev = pf->dev;
6447 struct i40e_hw *hw = &pf->hw;
6448 struct i40e_aq_set_phy_config config;
6449 enum i40e_status_code status;
6451 /* Set new PHY config */
6452 memset(&config, 0, sizeof(config));
6453 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
6455 config.fec_config |= bit_pos;
6456 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
6457 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
6458 config.phy_type = abilities->phy_type;
6459 config.phy_type_ext = abilities->phy_type_ext;
6460 config.link_speed = abilities->link_speed;
6461 config.eee_capability = abilities->eee_capability;
6462 config.eeer = abilities->eeer_val;
6463 config.low_power_ctrl = abilities->d3_lpan;
6464 status = i40e_aq_set_phy_config(hw, &config, NULL);
6468 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
6469 __func__, i40e_stat_str(hw, status),
6470 i40e_aq_str(hw, hw->aq.asq_last_status));
6479 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6481 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6482 int mode, error = 0;
6484 struct i40e_aq_get_phy_abilities_resp abilities;
6485 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
6488 /* Read in new mode */
6489 error = sysctl_handle_int(oidp, &mode, 0, req);
6490 if ((error) || (req->newptr == NULL))
6493 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6497 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6499 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6500 int mode, error = 0;
6502 struct i40e_aq_get_phy_abilities_resp abilities;
6503 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
6506 /* Read in new mode */
6507 error = sysctl_handle_int(oidp, &mode, 0, req);
6508 if ((error) || (req->newptr == NULL))
6511 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6515 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6517 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6518 int mode, error = 0;
6520 struct i40e_aq_get_phy_abilities_resp abilities;
6521 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
6524 /* Read in new mode */
6525 error = sysctl_handle_int(oidp, &mode, 0, req);
6526 if ((error) || (req->newptr == NULL))
6529 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6533 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6535 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6536 int mode, error = 0;
6538 struct i40e_aq_get_phy_abilities_resp abilities;
6539 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
6542 /* Read in new mode */
6543 error = sysctl_handle_int(oidp, &mode, 0, req);
6544 if ((error) || (req->newptr == NULL))
6547 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6551 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6553 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6554 int mode, error = 0;
6556 struct i40e_aq_get_phy_abilities_resp abilities;
6557 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
6560 /* Read in new mode */
6561 error = sysctl_handle_int(oidp, &mode, 0, req);
6562 if ((error) || (req->newptr == NULL))
6565 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6569 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
6571 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6572 struct i40e_hw *hw = &pf->hw;
6573 device_t dev = pf->dev;
6576 enum i40e_status_code status;
6578 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6580 device_printf(dev, "Could not allocate sbuf for output.\n");
6585 /* This amount is only necessary if reading the entire cluster into memory */
6586 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
6587 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
6588 int final_buff_len = 0;
6594 u16 curr_buff_size = 4096;
6595 u8 curr_next_table = 0;
6596 u32 curr_next_index = 0;
6602 sbuf_cat(buf, "\n");
6605 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
6606 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
6608 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
6609 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6613 /* copy info out of temp buffer */
6614 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
6615 final_buff_len += ret_buff_size;
6617 if (ret_next_table != curr_next_table) {
6618 /* We're done with the current table; we can dump out read data. */
6619 sbuf_printf(buf, "%d:", curr_next_table);
6620 int bytes_printed = 0;
6621 while (bytes_printed <= final_buff_len) {
6622 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
6623 bytes_printed += 16;
6625 sbuf_cat(buf, "\n");
6627 /* The entire cluster has been read; we're finished */
6628 if (ret_next_table == 0xFF)
6631 /* Otherwise clear the output buffer and continue reading */
6632 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
6636 if (ret_next_index == 0xFFFFFFFF)
6639 bzero(dump_buf, sizeof(dump_buf));
6640 curr_next_table = ret_next_table;
6641 curr_next_index = ret_next_index;
6645 free(final_buff, M_DEVBUF);
6646 error = sbuf_finish(buf);
6648 device_printf(dev, "Error finishing sbuf: %d\n", error);
6655 ixl_start_fw_lldp(struct ixl_pf *pf)
6657 struct i40e_hw *hw = &pf->hw;
6658 enum i40e_status_code status;
6660 status = i40e_aq_start_lldp(hw, false, NULL);
6661 if (status != I40E_SUCCESS) {
6662 switch (hw->aq.asq_last_status) {
6663 case I40E_AQ_RC_EEXIST:
6664 device_printf(pf->dev,
6665 "FW LLDP agent is already running\n");
6667 case I40E_AQ_RC_EPERM:
6668 device_printf(pf->dev,
6669 "Device configuration forbids SW from starting "
6670 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
6671 "attribute to \"Enabled\" to use this sysctl\n");
6674 device_printf(pf->dev,
6675 "Starting FW LLDP agent failed: error: %s, %s\n",
6676 i40e_stat_str(hw, status),
6677 i40e_aq_str(hw, hw->aq.asq_last_status));
6682 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6687 ixl_stop_fw_lldp(struct ixl_pf *pf)
6689 struct i40e_hw *hw = &pf->hw;
6690 device_t dev = pf->dev;
6691 enum i40e_status_code status;
6693 if (hw->func_caps.npar_enable != 0) {
6695 "Disabling FW LLDP agent is not supported on this device\n");
6699 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
6701 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
6705 status = i40e_aq_stop_lldp(hw, true, false, NULL);
6706 if (status != I40E_SUCCESS) {
6707 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
6709 "Disabling FW LLDP agent failed: error: %s, %s\n",
6710 i40e_stat_str(hw, status),
6711 i40e_aq_str(hw, hw->aq.asq_last_status));
6715 device_printf(dev, "FW LLDP agent is already stopped\n");
6718 i40e_aq_set_dcb_parameters(hw, true, NULL);
6719 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6724 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
6726 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6727 int state, new_state, error = 0;
6729 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
6731 /* Read in new mode */
6732 error = sysctl_handle_int(oidp, &new_state, 0, req);
6733 if ((error) || (req->newptr == NULL))
6736 /* Already in requested state */
6737 if (new_state == state)
6741 return ixl_stop_fw_lldp(pf);
6743 return ixl_start_fw_lldp(pf);
6747 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
6749 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6750 int state, new_state;
6751 int sysctl_handle_status = 0;
6752 enum i40e_status_code cmd_status;
6754 /* Init states' values */
6755 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
6757 /* Get requested mode */
6758 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
6759 if ((sysctl_handle_status) || (req->newptr == NULL))
6760 return (sysctl_handle_status);
6762 /* Check if state has changed */
6763 if (new_state == state)
6767 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
6769 /* Save new state or report error */
6772 atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
6774 atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
6775 } else if (cmd_status == I40E_ERR_CONFIG)
6784 ixl_attach_get_link_status(struct ixl_pf *pf)
6786 struct i40e_hw *hw = &pf->hw;
6787 device_t dev = pf->dev;
6790 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
6791 (hw->aq.fw_maj_ver < 4)) {
6792 i40e_msec_delay(75);
6793 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
6795 device_printf(dev, "link restart failed, aq_err=%d\n",
6796 pf->hw.aq.asq_last_status);
6801 /* Determine link state */
6802 hw->phy.get_link_info = TRUE;
6803 i40e_get_link_status(hw, &pf->link_up);