1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
53 static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
54 static u64 ixl_max_aq_speed_to_value(u8);
55 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
58 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
59 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
60 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
89 extern int ixl_enable_iwarp;
92 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
93 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
95 const char * const ixl_fc_string[6] = {
104 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
107 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
111 if (!(mask & pf->dbg_mask))
114 /* Re-implement device_printf() */
115 device_print_prettyname(pf->dev);
122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134 hw->aq.api_maj_ver, hw->aq.api_min_ver,
135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136 IXL_NVM_VERSION_HI_SHIFT,
137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138 IXL_NVM_VERSION_LO_SHIFT,
140 oem_ver, oem_build, oem_patch);
144 ixl_print_nvm_version(struct ixl_pf *pf)
146 struct i40e_hw *hw = &pf->hw;
147 device_t dev = pf->dev;
150 sbuf = sbuf_new_auto();
151 ixl_nvm_version_str(hw, sbuf);
153 device_printf(dev, "%s\n", sbuf_data(sbuf));
158 ixl_configure_tx_itr(struct ixl_pf *pf)
160 struct i40e_hw *hw = &pf->hw;
161 struct ixl_vsi *vsi = &pf->vsi;
162 struct ixl_queue *que = vsi->queues;
164 vsi->tx_itr_setting = pf->tx_itr;
166 for (int i = 0; i < vsi->num_queues; i++, que++) {
167 struct tx_ring *txr = &que->txr;
169 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
170 vsi->tx_itr_setting);
171 txr->itr = vsi->tx_itr_setting;
172 txr->latency = IXL_AVE_LATENCY;
177 ixl_configure_rx_itr(struct ixl_pf *pf)
179 struct i40e_hw *hw = &pf->hw;
180 struct ixl_vsi *vsi = &pf->vsi;
181 struct ixl_queue *que = vsi->queues;
183 vsi->rx_itr_setting = pf->rx_itr;
185 for (int i = 0; i < vsi->num_queues; i++, que++) {
186 struct rx_ring *rxr = &que->rxr;
188 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
189 vsi->rx_itr_setting);
190 rxr->itr = vsi->rx_itr_setting;
191 rxr->latency = IXL_AVE_LATENCY;
196 * Write PF ITR values to queue ITR registers.
199 ixl_configure_itr(struct ixl_pf *pf)
201 ixl_configure_tx_itr(pf);
202 ixl_configure_rx_itr(pf);
206 /*********************************************************************
209 * This routine is used in two ways. It is used by the stack as
210 * init entry point in network interface structure. It is also used
211 * by the driver as a hw/sw initialization routine to get to a
214 * return 0 on success, positive on failure
215 **********************************************************************/
217 ixl_init_locked(struct ixl_pf *pf)
219 struct i40e_hw *hw = &pf->hw;
220 struct ixl_vsi *vsi = &pf->vsi;
221 struct ifnet *ifp = vsi->ifp;
222 device_t dev = pf->dev;
223 struct i40e_filter_control_settings filter;
224 u8 tmpaddr[ETHER_ADDR_LEN];
227 INIT_DEBUGOUT("ixl_init_locked: begin");
228 IXL_PF_LOCK_ASSERT(pf);
233 * If the aq is dead here, it probably means something outside of the driver
234 * did something to the adapter, like a PF reset.
235 * So rebuild the driver's state here if that occurs.
237 if (!i40e_check_asq_alive(&pf->hw)) {
238 device_printf(dev, "Admin Queue is down; resetting...\n");
239 ixl_teardown_hw_structs(pf);
243 /* Get the latest mac address... User might use a LAA */
244 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
245 I40E_ETH_LENGTH_OF_ADDRESS);
246 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
247 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
248 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
249 bcopy(tmpaddr, hw->mac.addr,
250 I40E_ETH_LENGTH_OF_ADDRESS);
251 ret = i40e_aq_mac_address_write(hw,
252 I40E_AQC_WRITE_TYPE_LAA_ONLY,
255 device_printf(dev, "LLA address"
256 "change failed!!\n");
261 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
263 /* Set the various hardware offload abilities */
264 ifp->if_hwassist = 0;
265 if (ifp->if_capenable & IFCAP_TSO)
266 ifp->if_hwassist |= CSUM_TSO;
267 if (ifp->if_capenable & IFCAP_TXCSUM)
268 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
269 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
270 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
272 /* Set up the device filtering */
273 bzero(&filter, sizeof(filter));
274 filter.enable_ethtype = TRUE;
275 filter.enable_macvlan = TRUE;
276 filter.enable_fdir = FALSE;
277 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
278 if (i40e_set_filter_control(hw, &filter))
279 device_printf(dev, "i40e_set_filter_control() failed\n");
281 /* Prepare the VSI: rings, hmc contexts, etc... */
282 if (ixl_initialize_vsi(vsi)) {
283 device_printf(dev, "initialize vsi failed!!\n");
290 /* Add protocol filters to list */
291 ixl_init_filters(vsi);
293 /* Setup vlan's if needed */
294 ixl_setup_vlan_filters(vsi);
296 /* Set up MSI/X routing and the ITR settings */
298 ixl_configure_queue_intr_msix(pf);
299 ixl_configure_itr(pf);
301 ixl_configure_legacy(pf);
303 ixl_enable_rings(vsi);
305 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
307 ixl_reconfigure_filters(vsi);
309 /* And now turn on interrupts */
310 ixl_enable_intr(vsi);
313 hw->phy.get_link_info = TRUE;
314 i40e_get_link_status(hw, &pf->link_up);
315 ixl_update_link_status(pf);
317 /* Start the local timer */
318 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
320 /* Now inform the stack we're ready */
321 ifp->if_drv_flags |= IFF_DRV_RUNNING;
324 if (ixl_enable_iwarp && pf->iw_enabled) {
325 ret = ixl_iw_pf_init(pf);
328 "initialize iwarp failed, code %d\n", ret);
335 /*********************************************************************
337 * Get the hardware capabilities
339 **********************************************************************/
342 ixl_get_hw_capabilities(struct ixl_pf *pf)
344 struct i40e_aqc_list_capabilities_element_resp *buf;
345 struct i40e_hw *hw = &pf->hw;
346 device_t dev = pf->dev;
351 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
353 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
354 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
355 device_printf(dev, "Unable to allocate cap memory\n");
359 /* This populates the hw struct */
360 error = i40e_aq_discover_capabilities(hw, buf, len,
361 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
363 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
365 /* retry once with a larger buffer */
369 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
370 device_printf(dev, "capability discovery failed: %d\n",
371 pf->hw.aq.asq_last_status);
375 /* Capture this PF's starting queue pair */
376 pf->qbase = hw->func_caps.base_queue;
379 device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
380 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
381 hw->pf_id, hw->func_caps.num_vfs,
382 hw->func_caps.num_msix_vectors,
383 hw->func_caps.num_msix_vectors_vf,
384 hw->func_caps.fd_filters_guaranteed,
385 hw->func_caps.fd_filters_best_effort,
386 hw->func_caps.num_tx_qp,
387 hw->func_caps.num_rx_qp,
388 hw->func_caps.base_queue);
390 /* Print a subset of the capability information. */
391 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
392 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
393 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
394 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
395 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
398 struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
399 osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
400 if (osdep->i2c_intfc_num != -1)
407 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
409 device_t dev = vsi->dev;
411 /* Enable/disable TXCSUM/TSO4 */
412 if (!(ifp->if_capenable & IFCAP_TXCSUM)
413 && !(ifp->if_capenable & IFCAP_TSO4)) {
414 if (mask & IFCAP_TXCSUM) {
415 ifp->if_capenable |= IFCAP_TXCSUM;
416 /* enable TXCSUM, restore TSO if previously enabled */
417 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
418 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
419 ifp->if_capenable |= IFCAP_TSO4;
422 else if (mask & IFCAP_TSO4) {
423 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
424 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
426 "TSO4 requires txcsum, enabling both...\n");
428 } else if((ifp->if_capenable & IFCAP_TXCSUM)
429 && !(ifp->if_capenable & IFCAP_TSO4)) {
430 if (mask & IFCAP_TXCSUM)
431 ifp->if_capenable &= ~IFCAP_TXCSUM;
432 else if (mask & IFCAP_TSO4)
433 ifp->if_capenable |= IFCAP_TSO4;
434 } else if((ifp->if_capenable & IFCAP_TXCSUM)
435 && (ifp->if_capenable & IFCAP_TSO4)) {
436 if (mask & IFCAP_TXCSUM) {
437 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
438 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
440 "TSO4 requires txcsum, disabling both...\n");
441 } else if (mask & IFCAP_TSO4)
442 ifp->if_capenable &= ~IFCAP_TSO4;
445 /* Enable/disable TXCSUM_IPV6/TSO6 */
446 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
447 && !(ifp->if_capenable & IFCAP_TSO6)) {
448 if (mask & IFCAP_TXCSUM_IPV6) {
449 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
450 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
451 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
452 ifp->if_capenable |= IFCAP_TSO6;
454 } else if (mask & IFCAP_TSO6) {
455 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
456 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
458 "TSO6 requires txcsum6, enabling both...\n");
460 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
461 && !(ifp->if_capenable & IFCAP_TSO6)) {
462 if (mask & IFCAP_TXCSUM_IPV6)
463 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
464 else if (mask & IFCAP_TSO6)
465 ifp->if_capenable |= IFCAP_TSO6;
466 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
467 && (ifp->if_capenable & IFCAP_TSO6)) {
468 if (mask & IFCAP_TXCSUM_IPV6) {
469 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
470 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
472 "TSO6 requires txcsum6, disabling both...\n");
473 } else if (mask & IFCAP_TSO6)
474 ifp->if_capenable &= ~IFCAP_TSO6;
478 /* For the set_advertise sysctl */
480 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
482 struct i40e_hw *hw = &pf->hw;
483 device_t dev = pf->dev;
484 enum i40e_status_code status;
485 struct i40e_aq_get_phy_abilities_resp abilities;
487 /* Set initial sysctl values */
488 status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
491 /* Non-fatal error */
492 device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
497 pf->advertised_speed =
498 ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
502 ixl_teardown_hw_structs(struct ixl_pf *pf)
504 enum i40e_status_code status = 0;
505 struct i40e_hw *hw = &pf->hw;
506 device_t dev = pf->dev;
508 /* Shutdown LAN HMC */
509 if (hw->hmc.hmc_obj) {
510 status = i40e_shutdown_lan_hmc(hw);
513 "init: LAN HMC shutdown failure; status %d\n", status);
518 // XXX: This gets called when we know the adminq is inactive;
519 // so we already know it's setup when we get here.
521 /* Shutdown admin queue */
522 status = i40e_shutdown_adminq(hw);
525 "init: Admin Queue shutdown failure; status %d\n", status);
532 ixl_reset(struct ixl_pf *pf)
534 struct i40e_hw *hw = &pf->hw;
535 device_t dev = pf->dev;
539 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
541 error = i40e_pf_reset(hw);
543 device_printf(dev, "init: PF reset failure");
548 error = i40e_init_adminq(hw);
550 device_printf(dev, "init: Admin queue init failure;"
551 " status code %d", error);
556 i40e_clear_pxe_mode(hw);
558 error = ixl_get_hw_capabilities(pf);
560 device_printf(dev, "init: Error retrieving HW capabilities;"
561 " status code %d\n", error);
565 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
566 hw->func_caps.num_rx_qp, 0, 0);
568 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
574 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
576 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
582 // XXX: possible fix for panic, but our failure recovery is still broken
583 error = ixl_switch_config(pf);
585 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
590 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
593 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
594 " aq_err %d\n", error, hw->aq.asq_last_status);
599 error = i40e_set_fc(hw, &set_fc_err_mask, true);
601 device_printf(dev, "init: setting link flow control failed; retcode %d,"
602 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
606 // XXX: (Rebuild VSIs?)
608 /* Firmware delay workaround */
609 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
610 (hw->aq.fw_maj_ver < 4)) {
612 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
614 device_printf(dev, "init: link restart failed, aq_err %d\n",
615 hw->aq.asq_last_status);
626 ** MSIX Interrupt Handlers and Tasklets
629 ixl_handle_que(void *context, int pending)
631 struct ixl_queue *que = context;
632 struct ixl_vsi *vsi = que->vsi;
633 struct i40e_hw *hw = vsi->hw;
634 struct tx_ring *txr = &que->txr;
635 struct ifnet *ifp = vsi->ifp;
638 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
639 more = ixl_rxeof(que, IXL_RX_LIMIT);
642 if (!drbr_empty(ifp, txr->br))
643 ixl_mq_start_locked(ifp, txr);
646 taskqueue_enqueue(que->tq, &que->task);
651 /* Reenable this interrupt - hmmm */
652 ixl_enable_queue(hw, que->me);
657 /*********************************************************************
659 * Legacy Interrupt Service routine
661 **********************************************************************/
665 struct ixl_pf *pf = arg;
666 struct i40e_hw *hw = &pf->hw;
667 struct ixl_vsi *vsi = &pf->vsi;
668 struct ixl_queue *que = vsi->queues;
669 struct ifnet *ifp = vsi->ifp;
670 struct tx_ring *txr = &que->txr;
672 bool more_tx, more_rx;
676 /* Protect against spurious interrupts */
677 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
680 icr0 = rd32(hw, I40E_PFINT_ICR0);
684 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
685 taskqueue_enqueue(pf->tq, &pf->vflr_task);
688 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
689 taskqueue_enqueue(pf->tq, &pf->adminq);
692 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
695 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
698 more_tx = ixl_txeof(que);
699 if (!drbr_empty(vsi->ifp, txr->br))
704 ixl_enable_intr0(hw);
708 /*********************************************************************
710 * MSIX VSI Interrupt Service routine
712 **********************************************************************/
714 ixl_msix_que(void *arg)
716 struct ixl_queue *que = arg;
717 struct ixl_vsi *vsi = que->vsi;
718 struct i40e_hw *hw = vsi->hw;
719 struct tx_ring *txr = &que->txr;
720 bool more_tx, more_rx;
722 /* Protect against spurious interrupts */
723 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
728 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
731 more_tx = ixl_txeof(que);
733 ** Make certain that if the stack
734 ** has anything queued the task gets
735 ** scheduled to handle it.
737 if (!drbr_empty(vsi->ifp, txr->br))
741 ixl_set_queue_rx_itr(que);
742 ixl_set_queue_tx_itr(que);
744 if (more_tx || more_rx)
745 taskqueue_enqueue(que->tq, &que->task);
747 ixl_enable_queue(hw, que->me);
753 /*********************************************************************
755 * MSIX Admin Queue Interrupt Service routine
757 **********************************************************************/
759 ixl_msix_adminq(void *arg)
761 struct ixl_pf *pf = arg;
762 struct i40e_hw *hw = &pf->hw;
763 device_t dev = pf->dev;
764 u32 reg, mask, rstat_reg;
765 bool do_task = FALSE;
769 reg = rd32(hw, I40E_PFINT_ICR0);
770 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
772 /* Check on the cause */
773 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
774 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
778 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
779 ixl_handle_mdd_event(pf);
780 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
783 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
784 device_printf(dev, "Reset Requested!\n");
785 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
786 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
787 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
788 device_printf(dev, "Reset type: ");
790 /* These others might be handled similarly to an EMPR reset */
791 case I40E_RESET_CORER:
794 case I40E_RESET_GLOBR:
797 case I40E_RESET_EMPR:
799 atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
805 /* overload admin queue task to check reset progress */
809 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
810 device_printf(dev, "ECC Error detected!\n");
813 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
814 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
815 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
816 device_printf(dev, "HMC Error detected!\n");
817 device_printf(dev, "INFO 0x%08x\n", reg);
818 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
819 device_printf(dev, "DATA 0x%08x\n", reg);
820 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
824 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
825 device_printf(dev, "PCI Exception detected!\n");
829 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
830 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
831 taskqueue_enqueue(pf->tq, &pf->vflr_task);
836 taskqueue_enqueue(pf->tq, &pf->adminq);
838 ixl_enable_intr0(hw);
842 ixl_set_promisc(struct ixl_vsi *vsi)
844 struct ifnet *ifp = vsi->ifp;
845 struct i40e_hw *hw = vsi->hw;
847 bool uni = FALSE, multi = FALSE;
849 if (ifp->if_flags & IFF_ALLMULTI)
851 else { /* Need to count the multicast addresses */
852 struct ifmultiaddr *ifma;
854 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
855 if (ifma->ifma_addr->sa_family != AF_LINK)
857 if (mcnt == MAX_MULTICAST_ADDR)
861 if_maddr_runlock(ifp);
864 if (mcnt >= MAX_MULTICAST_ADDR)
866 if (ifp->if_flags & IFF_PROMISC)
869 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
870 vsi->seid, uni, NULL, TRUE);
871 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
872 vsi->seid, multi, NULL);
876 /*********************************************************************
879 * Routines for multicast and vlan filter management.
881 *********************************************************************/
883 ixl_add_multi(struct ixl_vsi *vsi)
885 struct ifmultiaddr *ifma;
886 struct ifnet *ifp = vsi->ifp;
887 struct i40e_hw *hw = vsi->hw;
890 IOCTL_DEBUGOUT("ixl_add_multi: begin");
894 ** First just get a count, to decide if we
895 ** we simply use multicast promiscuous.
897 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
898 if (ifma->ifma_addr->sa_family != AF_LINK)
902 if_maddr_runlock(ifp);
904 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
905 /* delete existing MC filters */
906 ixl_del_hw_filters(vsi, mcnt);
907 i40e_aq_set_vsi_multicast_promiscuous(hw,
908 vsi->seid, TRUE, NULL);
914 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
915 if (ifma->ifma_addr->sa_family != AF_LINK)
917 ixl_add_mc_filter(vsi,
918 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
921 if_maddr_runlock(ifp);
923 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
924 ixl_add_hw_filters(vsi, flags, mcnt);
927 IOCTL_DEBUGOUT("ixl_add_multi: end");
932 ixl_del_multi(struct ixl_vsi *vsi)
934 struct ifnet *ifp = vsi->ifp;
935 struct ifmultiaddr *ifma;
936 struct ixl_mac_filter *f;
940 IOCTL_DEBUGOUT("ixl_del_multi: begin");
942 /* Search for removed multicast addresses */
944 SLIST_FOREACH(f, &vsi->ftl, next) {
945 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
947 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
948 if (ifma->ifma_addr->sa_family != AF_LINK)
950 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
951 if (cmp_etheraddr(f->macaddr, mc_addr)) {
956 if (match == FALSE) {
957 f->flags |= IXL_FILTER_DEL;
962 if_maddr_runlock(ifp);
965 ixl_del_hw_filters(vsi, mcnt);
969 /*********************************************************************
972 * This routine checks for link status,updates statistics,
973 * and runs the watchdog check.
975 * Only runs when the driver is configured UP and RUNNING.
977 **********************************************************************/
980 ixl_local_timer(void *arg)
982 struct ixl_pf *pf = arg;
983 struct i40e_hw *hw = &pf->hw;
984 struct ixl_vsi *vsi = &pf->vsi;
985 struct ixl_queue *que = vsi->queues;
986 device_t dev = pf->dev;
990 s32 timer, new_timer;
992 IXL_PF_LOCK_ASSERT(pf);
994 /* Fire off the adminq task */
995 taskqueue_enqueue(pf->tq, &pf->adminq);
998 ixl_update_stats_counters(pf);
1000 /* Check status of the queues */
1001 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1002 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1003 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1005 for (int i = 0; i < vsi->num_queues; i++, que++) {
1007 timer = atomic_load_acq_32(&txr->watchdog_timer);
1009 new_timer = timer - hz;
1010 if (new_timer <= 0) {
1011 atomic_store_rel_32(&txr->watchdog_timer, -1);
1012 device_printf(dev, "WARNING: queue %d "
1013 "appears to be hung!\n", que->me);
1017 * If this fails, that means something in the TX path has updated
1018 * the watchdog, so it means the TX path is still working and
1019 * the watchdog doesn't need to countdown.
1021 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
1022 /* Any queues with outstanding work get a sw irq */
1023 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1027 /* Reset when a queue shows hung */
1031 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1035 device_printf(dev, "WARNING: Resetting!\n");
1036 pf->watchdog_events++;
1037 ixl_init_locked(pf);
1041 ixl_link_up_msg(struct ixl_pf *pf)
1043 struct i40e_hw *hw = &pf->hw;
1044 struct ifnet *ifp = pf->vsi.ifp;
1046 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
1048 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1049 (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
1050 "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
1051 "Clause 108 RS-FEC" : "None",
1052 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1053 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1054 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1055 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1056 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1057 ixl_fc_string[1] : ixl_fc_string[0]);
1061 ** Note: this routine updates the OS on the link state
1062 ** the real check of the hardware only happens with
1063 ** a link interrupt.
1066 ixl_update_link_status(struct ixl_pf *pf)
1068 struct ixl_vsi *vsi = &pf->vsi;
1069 struct ifnet *ifp = vsi->ifp;
1070 device_t dev = pf->dev;
1073 if (vsi->link_active == FALSE) {
1074 vsi->link_active = TRUE;
1075 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1076 if_link_state_change(ifp, LINK_STATE_UP);
1077 ixl_link_up_msg(pf);
1079 } else { /* Link down */
1080 if (vsi->link_active == TRUE) {
1082 device_printf(dev, "Link is Down\n");
1083 if_link_state_change(ifp, LINK_STATE_DOWN);
1084 vsi->link_active = FALSE;
1091 /*********************************************************************
1093 * This routine disables all traffic on the adapter by issuing a
1094 * global reset on the MAC and deallocates TX/RX buffers.
1096 **********************************************************************/
1099 ixl_stop_locked(struct ixl_pf *pf)
1101 struct ixl_vsi *vsi = &pf->vsi;
1102 struct ifnet *ifp = vsi->ifp;
1104 INIT_DEBUGOUT("ixl_stop: begin\n");
1106 IXL_PF_LOCK_ASSERT(pf);
1109 /* Stop iWARP device */
1110 if (ixl_enable_iwarp && pf->iw_enabled)
1114 /* Stop the local timer */
1115 callout_stop(&pf->timer);
1117 ixl_disable_rings_intr(vsi);
1118 ixl_disable_rings(vsi);
1120 /* Tell the stack that the interface is no longer active */
1121 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1125 ixl_stop(struct ixl_pf *pf)
1128 ixl_stop_locked(pf);
1132 /*********************************************************************
1134 * Setup MSIX Interrupt resources and handlers for the VSI
1136 **********************************************************************/
1138 ixl_setup_legacy(struct ixl_pf *pf)
1140 device_t dev = pf->dev;
1145 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1146 &rid, RF_SHAREABLE | RF_ACTIVE);
1147 if (pf->res == NULL) {
1148 device_printf(dev, "bus_alloc_resource_any() for"
1149 " legacy/msi interrupt\n");
1153 /* Set the handler function */
1154 error = bus_setup_intr(dev, pf->res,
1155 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1156 ixl_intr, pf, &pf->tag);
1159 device_printf(dev, "bus_setup_intr() for legacy/msi"
1160 " interrupt handler failed, error %d\n", error);
1163 error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1166 device_printf(dev, "bus_describe_intr() for Admin Queue"
1167 " interrupt name failed, error %d\n", error);
1174 ixl_setup_adminq_tq(struct ixl_pf *pf)
1176 device_t dev = pf->dev;
1179 /* Tasklet for Admin Queue interrupts */
1180 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1183 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1185 /* Create and start Admin Queue taskqueue */
1186 pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1187 taskqueue_thread_enqueue, &pf->tq);
1189 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1192 error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1193 device_get_nameunit(dev));
1195 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1197 taskqueue_free(pf->tq);
1204 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1206 struct ixl_queue *que = vsi->queues;
1207 device_t dev = vsi->dev;
1213 /* Create queue tasks and start queue taskqueues */
1214 for (int i = 0; i < vsi->num_queues; i++, que++) {
1215 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1216 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1217 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1218 taskqueue_thread_enqueue, &que->tq);
1220 CPU_SETOF(cpu_id, &cpu_mask);
1221 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1222 &cpu_mask, "%s (bucket %d)",
1223 device_get_nameunit(dev), cpu_id);
1225 taskqueue_start_threads(&que->tq, 1, PI_NET,
1226 "%s (que %d)", device_get_nameunit(dev), que->me);
1234 ixl_free_adminq_tq(struct ixl_pf *pf)
1237 taskqueue_free(pf->tq);
1243 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1245 struct ixl_queue *que = vsi->queues;
1247 for (int i = 0; i < vsi->num_queues; i++, que++) {
1249 taskqueue_free(que->tq);
1256 ixl_setup_adminq_msix(struct ixl_pf *pf)
1258 device_t dev = pf->dev;
1261 /* Admin IRQ rid is 1, vector is 0 */
1263 /* Get interrupt resource from bus */
1264 pf->res = bus_alloc_resource_any(dev,
1265 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1267 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1268 " interrupt failed [rid=%d]\n", rid);
1271 /* Then associate interrupt with handler */
1272 error = bus_setup_intr(dev, pf->res,
1273 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1274 ixl_msix_adminq, pf, &pf->tag);
1277 device_printf(dev, "bus_setup_intr() for Admin Queue"
1278 " interrupt handler failed, error %d\n", error);
1281 error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1284 device_printf(dev, "bus_describe_intr() for Admin Queue"
1285 " interrupt name failed, error %d\n", error);
1293 * Allocate interrupt resources from bus and associate an interrupt handler
1294 * to those for the VSI's queues.
1297 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1299 device_t dev = vsi->dev;
1300 struct ixl_queue *que = vsi->queues;
1301 struct tx_ring *txr;
1302 int error, rid, vector = 1;
1304 /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1305 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1309 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1310 RF_SHAREABLE | RF_ACTIVE);
1312 device_printf(dev, "bus_alloc_resource_any() for"
1313 " Queue %d interrupt failed [rid=%d]\n",
1317 /* Set the handler function */
1318 error = bus_setup_intr(dev, que->res,
1319 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1320 ixl_msix_que, que, &que->tag);
1322 device_printf(dev, "bus_setup_intr() for Queue %d"
1323 " interrupt handler failed, error %d\n",
1325 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1328 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1330 device_printf(dev, "bus_describe_intr() for Queue %d"
1331 " interrupt name failed, error %d\n",
1334 /* Bind the vector to a CPU */
1336 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1338 error = bus_bind_intr(dev, que->res, cpu_id);
1340 device_printf(dev, "bus_bind_intr() for Queue %d"
1341 " to CPU %d failed, error %d\n",
1342 que->me, cpu_id, error);
1351 * When used in a virtualized environment PCI BUSMASTER capability may not be set
1352 * so explicity set it here and rewrite the ENABLE in the MSIX control register
1353 * at this point to cause the host to successfully initialize us.
1356 ixl_set_busmaster(device_t dev)
1360 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1361 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1362 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1366 * rewrite the ENABLE in the MSIX control register
1367 * to cause the host to successfully initialize us.
1370 ixl_set_msix_enable(device_t dev)
1374 pci_find_cap(dev, PCIY_MSIX, &rid);
1375 rid += PCIR_MSIX_CTRL;
1376 msix_ctrl = pci_read_config(dev, rid, 2);
1377 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1378 pci_write_config(dev, rid, msix_ctrl, 2);
1382 * Allocate MSI/X vectors from the OS.
1383 * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1386 ixl_init_msix(struct ixl_pf *pf)
1388 device_t dev = pf->dev;
1389 struct i40e_hw *hw = &pf->hw;
1390 int auto_max_queues;
1391 int rid, want, vectors, queues, available;
1393 int iw_want, iw_vectors;
1398 /* Override by tuneable */
1399 if (!pf->enable_msix)
1402 /* Ensure proper operation in virtualized environment */
1403 ixl_set_busmaster(dev);
1405 /* First try MSI/X */
1406 rid = PCIR_BAR(IXL_MSIX_BAR);
1407 pf->msix_mem = bus_alloc_resource_any(dev,
1408 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1409 if (!pf->msix_mem) {
1410 /* May not be enabled */
1411 device_printf(pf->dev,
1412 "Unable to map MSIX table\n");
1416 available = pci_msix_count(dev);
1417 if (available < 2) {
1418 /* system has msix disabled (0), or only one vector (1) */
1419 bus_release_resource(dev, SYS_RES_MEMORY,
1421 pf->msix_mem = NULL;
1425 /* Clamp max number of queues based on:
1426 * - # of MSI-X vectors available
1427 * - # of cpus available
1428 * - # of queues that can be assigned to the LAN VSI
1430 auto_max_queues = min(mp_ncpus, available - 1);
1431 if (hw->mac.type == I40E_MAC_X722)
1432 auto_max_queues = min(auto_max_queues, 128);
1434 auto_max_queues = min(auto_max_queues, 64);
1436 /* Override with tunable value if tunable is less than autoconfig count */
1437 if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1438 queues = pf->max_queues;
1439 /* Use autoconfig amount if that's lower */
1440 else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1441 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1442 "autoconfig amount (%d)...\n",
1443 pf->max_queues, auto_max_queues);
1444 queues = auto_max_queues;
1446 /* Limit maximum auto-configured queues to 8 if no user value is set */
1448 queues = min(auto_max_queues, 8);
1451 /* If we're doing RSS, clamp at the number of RSS buckets */
1452 if (queues > rss_getnumbuckets())
1453 queues = rss_getnumbuckets();
1457 ** Want one vector (RX/TX pair) per queue
1458 ** plus an additional for the admin queue.
1461 if (want <= available) /* Have enough */
1464 device_printf(pf->dev,
1465 "MSIX Configuration Problem, "
1466 "%d vectors available but %d wanted!\n",
1468 pf->msix_mem = NULL;
1469 goto no_msix; /* Will go to Legacy setup */
1473 if (ixl_enable_iwarp) {
1474 /* iWARP wants additional vector for CQP */
1475 iw_want = mp_ncpus + 1;
1476 available -= vectors;
1477 if (available > 0) {
1478 iw_vectors = (available >= iw_want) ?
1479 iw_want : available;
1480 vectors += iw_vectors;
1486 ixl_set_msix_enable(dev);
1487 if (pci_alloc_msix(dev, &vectors) == 0) {
1488 device_printf(pf->dev,
1489 "Using MSIX interrupts with %d vectors\n", vectors);
1492 if (ixl_enable_iwarp)
1493 pf->iw_msix = iw_vectors;
1496 pf->vsi.num_queues = queues;
1499 * If we're doing RSS, the number of queues needs to
1500 * match the number of RSS buckets that are configured.
1502 * + If there's more queues than RSS buckets, we'll end
1503 * up with queues that get no traffic.
1505 * + If there's more RSS buckets than queues, we'll end
1506 * up having multiple RSS buckets map to the same queue,
1507 * so there'll be some contention.
1509 if (queues != rss_getnumbuckets()) {
1511 "%s: queues (%d) != RSS buckets (%d)"
1512 "; performance will be impacted.\n",
1513 __func__, queues, rss_getnumbuckets());
1519 vectors = pci_msi_count(dev);
1520 pf->vsi.num_queues = 1;
1522 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1523 device_printf(pf->dev, "Using an MSI interrupt\n");
1526 device_printf(pf->dev, "Using a Legacy interrupt\n");
1532 * Configure admin queue/misc interrupt cause registers in hardware.
1535 ixl_configure_intr0_msix(struct ixl_pf *pf)
1537 struct i40e_hw *hw = &pf->hw;
1540 /* First set up the adminq - vector 0 */
1541 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
1542 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
1544 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1545 I40E_PFINT_ICR0_ENA_GRST_MASK |
1546 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1547 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1548 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1549 I40E_PFINT_ICR0_ENA_VFLR_MASK |
1550 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1551 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1554 * 0x7FF is the end of the queue list.
1555 * This means we won't use MSI-X vector 0 for a queue interrupt
1558 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1559 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1560 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1562 wr32(hw, I40E_PFINT_DYN_CTL0,
1563 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1564 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1566 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1570 * Configure queue interrupt cause registers in hardware.
1573 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1575 struct i40e_hw *hw = &pf->hw;
1576 struct ixl_vsi *vsi = &pf->vsi;
1580 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1581 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1582 /* First queue type is RX / 0 */
1583 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1585 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1586 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1587 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1588 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1589 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1590 wr32(hw, I40E_QINT_RQCTL(i), reg);
1592 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1593 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1594 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1595 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1596 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1597 wr32(hw, I40E_QINT_TQCTL(i), reg);
1602 * Configure for MSI single vector operation
1605 ixl_configure_legacy(struct ixl_pf *pf)
1607 struct i40e_hw *hw = &pf->hw;
1608 struct ixl_vsi *vsi = &pf->vsi;
1609 struct ixl_queue *que = vsi->queues;
1610 struct rx_ring *rxr = &que->rxr;
1611 struct tx_ring *txr = &que->txr;
1615 vsi->tx_itr_setting = pf->tx_itr;
1616 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1617 vsi->tx_itr_setting);
1618 txr->itr = vsi->tx_itr_setting;
1620 vsi->rx_itr_setting = pf->rx_itr;
1621 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1622 vsi->rx_itr_setting);
1623 rxr->itr = vsi->rx_itr_setting;
1625 /* Setup "other" causes */
1626 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1627 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1628 | I40E_PFINT_ICR0_ENA_GRST_MASK
1629 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1630 | I40E_PFINT_ICR0_ENA_GPIO_MASK
1631 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1632 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1633 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1634 | I40E_PFINT_ICR0_ENA_VFLR_MASK
1635 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1637 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1639 /* No ITR for non-queue interrupts */
1640 wr32(hw, I40E_PFINT_STAT_CTL0,
1641 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1643 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1644 wr32(hw, I40E_PFINT_LNKLST0, 0);
1646 /* Associate the queue pair to the vector and enable the q int */
1647 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1648 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1649 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1650 wr32(hw, I40E_QINT_RQCTL(0), reg);
1652 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1653 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1654 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1655 wr32(hw, I40E_QINT_TQCTL(0), reg);
1659 ixl_allocate_pci_resources(struct ixl_pf *pf)
1662 struct i40e_hw *hw = &pf->hw;
1663 device_t dev = pf->dev;
1667 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1670 if (!(pf->pci_mem)) {
1671 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1675 /* Save off the PCI information */
1676 hw->vendor_id = pci_get_vendor(dev);
1677 hw->device_id = pci_get_device(dev);
1678 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1679 hw->subsystem_vendor_id =
1680 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1681 hw->subsystem_device_id =
1682 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1684 hw->bus.device = pci_get_slot(dev);
1685 hw->bus.func = pci_get_function(dev);
1687 /* Save off register access information */
1688 pf->osdep.mem_bus_space_tag =
1689 rman_get_bustag(pf->pci_mem);
1690 pf->osdep.mem_bus_space_handle =
1691 rman_get_bushandle(pf->pci_mem);
1692 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1693 pf->osdep.flush_reg = I40E_GLGEN_STAT;
1694 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1696 pf->hw.back = &pf->osdep;
1702 * Teardown and release the admin queue/misc vector
1706 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1708 device_t dev = pf->dev;
1711 if (pf->admvec) /* we are doing MSIX */
1712 rid = pf->admvec + 1;
1714 (pf->msix != 0) ? (rid = 1):(rid = 0);
1716 if (pf->tag != NULL) {
1717 bus_teardown_intr(dev, pf->res, pf->tag);
1719 device_printf(dev, "bus_teardown_intr() for"
1720 " interrupt 0 failed\n");
1725 if (pf->res != NULL) {
1726 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1728 device_printf(dev, "bus_release_resource() for"
1729 " interrupt 0 failed [rid=%d]\n", rid);
1739 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1741 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1742 struct ixl_queue *que = vsi->queues;
1743 device_t dev = vsi->dev;
1746 /* We may get here before stations are setup */
1747 if ((pf->msix < 2) || (que == NULL))
1750 /* Release all MSIX queue resources */
1751 for (int i = 0; i < vsi->num_queues; i++, que++) {
1752 rid = que->msix + 1;
1753 if (que->tag != NULL) {
1754 error = bus_teardown_intr(dev, que->res, que->tag);
1756 device_printf(dev, "bus_teardown_intr() for"
1757 " Queue %d interrupt failed\n",
1763 if (que->res != NULL) {
1764 error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1766 device_printf(dev, "bus_release_resource() for"
1767 " Queue %d interrupt failed [rid=%d]\n",
1779 ixl_free_pci_resources(struct ixl_pf *pf)
1781 device_t dev = pf->dev;
1784 ixl_teardown_queue_msix(&pf->vsi);
1785 ixl_teardown_adminq_msix(pf);
1788 pci_release_msi(dev);
1790 memrid = PCIR_BAR(IXL_MSIX_BAR);
1792 if (pf->msix_mem != NULL)
1793 bus_release_resource(dev, SYS_RES_MEMORY,
1794 memrid, pf->msix_mem);
1796 if (pf->pci_mem != NULL)
1797 bus_release_resource(dev, SYS_RES_MEMORY,
1798 PCIR_BAR(0), pf->pci_mem);
1804 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1806 /* Display supported media types */
1807 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1808 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1810 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1811 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1812 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1813 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1814 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1815 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1817 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1818 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1819 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1820 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1822 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1823 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1824 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1825 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1826 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1827 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1829 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1830 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1831 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1832 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1833 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1834 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1835 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1836 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1837 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1838 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1840 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1841 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1843 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1844 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1845 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1846 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1847 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1848 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1849 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1850 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1851 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1852 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1853 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1855 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1856 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1858 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1859 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1860 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1861 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1863 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1864 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1865 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1866 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1867 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1868 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1869 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1870 ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
1873 /*********************************************************************
1875 * Setup networking device structure and register an interface.
1877 **********************************************************************/
1879 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1881 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1883 struct i40e_hw *hw = vsi->hw;
1884 struct ixl_queue *que = vsi->queues;
1885 struct i40e_aq_get_phy_abilities_resp abilities;
1886 enum i40e_status_code aq_error = 0;
1888 INIT_DEBUGOUT("ixl_setup_interface: begin");
1890 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1892 device_printf(dev, "can not allocate ifnet structure\n");
1895 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1896 ifp->if_mtu = ETHERMTU;
1897 ifp->if_init = ixl_init;
1898 ifp->if_softc = vsi;
1899 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1900 ifp->if_ioctl = ixl_ioctl;
1902 #if __FreeBSD_version >= 1100036
1903 if_setgetcounterfn(ifp, ixl_get_counter);
1906 ifp->if_transmit = ixl_mq_start;
1908 ifp->if_qflush = ixl_qflush;
1910 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1912 vsi->max_frame_size =
1913 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1914 + ETHER_VLAN_ENCAP_LEN;
1916 /* Set TSO limits */
1917 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1918 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1919 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1922 * Tell the upper layer(s) we support long frames.
1924 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1926 ifp->if_capabilities |= IFCAP_HWCSUM;
1927 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1928 ifp->if_capabilities |= IFCAP_TSO;
1929 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1930 ifp->if_capabilities |= IFCAP_LRO;
1932 /* VLAN capabilties */
1933 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1936 | IFCAP_VLAN_HWCSUM;
1937 ifp->if_capenable = ifp->if_capabilities;
1940 ** Don't turn this on by default, if vlans are
1941 ** created on another pseudo device (eg. lagg)
1942 ** then vlan events are not passed thru, breaking
1943 ** operation, but with HW FILTER off it works. If
1944 ** using vlans directly on the ixl driver you can
1945 ** enable this and get full hardware tag filtering.
1947 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1950 * Specify the media types supported by this adapter and register
1951 * callbacks to update media and link information
1953 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1956 aq_error = i40e_aq_get_phy_capabilities(hw,
1957 FALSE, TRUE, &abilities, NULL);
1958 /* May need delay to detect fiber correctly */
1959 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1960 i40e_msec_delay(200);
1961 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1962 TRUE, &abilities, NULL);
1965 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1966 device_printf(dev, "Unknown PHY type detected!\n");
1969 "Error getting supported media types, err %d,"
1970 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1973 pf->supported_speeds = abilities.link_speed;
1974 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1976 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1978 /* Use autoselect media by default */
1979 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1980 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1982 ether_ifattach(ifp, hw->mac.addr);
1988 ** Run when the Admin Queue gets a link state change interrupt.
1991 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1993 struct i40e_hw *hw = &pf->hw;
1994 device_t dev = pf->dev;
1995 struct i40e_aqc_get_link_status *status =
1996 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1998 /* Request link status from adapter */
1999 hw->phy.get_link_info = TRUE;
2000 i40e_get_link_status(hw, &pf->link_up);
2002 /* Print out message if an unqualified module is found */
2003 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2004 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2005 (!(status->link_info & I40E_AQ_LINK_UP)))
2006 device_printf(dev, "Link failed because "
2007 "an unqualified module was detected!\n");
2009 /* Update OS link info */
2010 ixl_update_link_status(pf);
2013 /*********************************************************************
2015 * Get Firmware Switch configuration
2016 * - this will need to be more robust when more complex
2017 * switch configurations are enabled.
2019 **********************************************************************/
2021 ixl_switch_config(struct ixl_pf *pf)
2023 struct i40e_hw *hw = &pf->hw;
2024 struct ixl_vsi *vsi = &pf->vsi;
2025 device_t dev = vsi->dev;
2026 struct i40e_aqc_get_switch_config_resp *sw_config;
2027 u8 aq_buf[I40E_AQ_LARGE_BUF];
2031 memset(&aq_buf, 0, sizeof(aq_buf));
2032 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2033 ret = i40e_aq_get_switch_config(hw, sw_config,
2034 sizeof(aq_buf), &next, NULL);
2036 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2037 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2040 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2042 "Switch config: header reported: %d in structure, %d total\n",
2043 sw_config->header.num_reported, sw_config->header.num_total);
2044 for (int i = 0; i < sw_config->header.num_reported; i++) {
2046 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2047 sw_config->element[i].element_type,
2048 sw_config->element[i].seid,
2049 sw_config->element[i].uplink_seid,
2050 sw_config->element[i].downlink_seid);
2053 /* Simplified due to a single VSI */
2054 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2055 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2056 vsi->seid = sw_config->element[0].seid;
2060 /*********************************************************************
2062 * Initialize the VSI: this handles contexts, which means things
2063 * like the number of descriptors, buffer size,
2064 * plus we init the rings thru this function.
2066 **********************************************************************/
2068 ixl_initialize_vsi(struct ixl_vsi *vsi)
2070 struct ixl_pf *pf = vsi->back;
2071 struct ixl_queue *que = vsi->queues;
2072 device_t dev = vsi->dev;
2073 struct i40e_hw *hw = vsi->hw;
2074 struct i40e_vsi_context ctxt;
2078 memset(&ctxt, 0, sizeof(ctxt));
2079 ctxt.seid = vsi->seid;
2080 if (pf->veb_seid != 0)
2081 ctxt.uplink_seid = pf->veb_seid;
2082 ctxt.pf_num = hw->pf_id;
2083 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2085 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2086 " aq_error %d\n", err, hw->aq.asq_last_status);
2089 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2090 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2091 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2092 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2093 ctxt.uplink_seid, ctxt.vsi_number,
2094 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2095 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2096 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2098 ** Set the queue and traffic class bits
2099 ** - when multiple traffic classes are supported
2100 ** this will need to be more robust.
2102 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2103 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2104 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2105 ctxt.info.queue_mapping[0] = 0;
2107 * This VSI will only use traffic class 0; start traffic class 0's
2108 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2109 * the driver may not use all of them).
2111 tc_queues = bsrl(pf->qtag.num_allocated);
2112 ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2113 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2114 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2115 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2117 /* Set VLAN receive stripping mode */
2118 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2119 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2120 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2121 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2123 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2126 /* Set TCP Enable for iWARP capable VSI */
2127 if (ixl_enable_iwarp && pf->iw_enabled) {
2128 ctxt.info.valid_sections |=
2129 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2130 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2133 /* Save VSI number and info for use later */
2134 vsi->vsi_num = ctxt.vsi_number;
2135 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2137 /* Reset VSI statistics */
2138 ixl_vsi_reset_stats(vsi);
2139 vsi->hw_filters_add = 0;
2140 vsi->hw_filters_del = 0;
2142 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2144 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2146 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2147 " aq_error %d\n", err, hw->aq.asq_last_status);
2151 for (int i = 0; i < vsi->num_queues; i++, que++) {
2152 struct tx_ring *txr = &que->txr;
2153 struct rx_ring *rxr = &que->rxr;
2154 struct i40e_hmc_obj_txq tctx;
2155 struct i40e_hmc_obj_rxq rctx;
2159 /* Setup the HMC TX Context */
2160 size = que->num_desc * sizeof(struct i40e_tx_desc);
2161 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2162 tctx.new_context = 1;
2163 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2164 tctx.qlen = que->num_desc;
2166 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2167 /* Enable HEAD writeback */
2168 tctx.head_wb_ena = 1;
2169 tctx.head_wb_addr = txr->dma.pa +
2170 (que->num_desc * sizeof(struct i40e_tx_desc));
2171 tctx.rdylist_act = 0;
2172 err = i40e_clear_lan_tx_queue_context(hw, i);
2174 device_printf(dev, "Unable to clear TX context\n");
2177 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2179 device_printf(dev, "Unable to set TX context\n");
2182 /* Associate the ring with this PF */
2183 txctl = I40E_QTX_CTL_PF_QUEUE;
2184 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2185 I40E_QTX_CTL_PF_INDX_MASK);
2186 wr32(hw, I40E_QTX_CTL(i), txctl);
2189 /* Do ring (re)init */
2190 ixl_init_tx_ring(que);
2192 /* Next setup the HMC RX Context */
2193 if (vsi->max_frame_size <= MCLBYTES)
2194 rxr->mbuf_sz = MCLBYTES;
2196 rxr->mbuf_sz = MJUMPAGESIZE;
2198 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2200 /* Set up an RX context for the HMC */
2201 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2202 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2203 /* ignore header split for now */
2204 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2205 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2206 vsi->max_frame_size : max_rxmax;
2208 rctx.dsize = 1; /* do 32byte descriptors */
2209 rctx.hsplit_0 = 0; /* no HDR split initially */
2210 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2211 rctx.qlen = que->num_desc;
2212 rctx.tphrdesc_ena = 1;
2213 rctx.tphwdesc_ena = 1;
2214 rctx.tphdata_ena = 0;
2215 rctx.tphhead_ena = 0;
2216 rctx.lrxqthresh = 2;
2223 err = i40e_clear_lan_rx_queue_context(hw, i);
2226 "Unable to clear RX context %d\n", i);
2229 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2231 device_printf(dev, "Unable to set RX context %d\n", i);
2234 err = ixl_init_rx_ring(que);
2236 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2240 /* preserve queue */
2241 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2242 struct netmap_adapter *na = NA(vsi->ifp);
2243 struct netmap_kring *kring = &na->rx_rings[i];
2244 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2245 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2247 #endif /* DEV_NETMAP */
2248 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2254 /*********************************************************************
2256 * Free all VSI structs.
2258 **********************************************************************/
2260 ixl_free_vsi(struct ixl_vsi *vsi)
2262 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2263 struct ixl_queue *que = vsi->queues;
2265 /* Free station queues */
2269 for (int i = 0; i < vsi->num_queues; i++, que++) {
2270 struct tx_ring *txr = &que->txr;
2271 struct rx_ring *rxr = &que->rxr;
2273 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2276 ixl_free_que_tx(que);
2278 i40e_free_dma_mem(&pf->hw, &txr->dma);
2280 IXL_TX_LOCK_DESTROY(txr);
2282 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2285 ixl_free_que_rx(que);
2287 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2289 IXL_RX_LOCK_DESTROY(rxr);
2291 free(vsi->queues, M_DEVBUF);
2294 /* Free VSI filter list */
2295 ixl_free_mac_filters(vsi);
2299 ixl_free_mac_filters(struct ixl_vsi *vsi)
2301 struct ixl_mac_filter *f;
2303 while (!SLIST_EMPTY(&vsi->ftl)) {
2304 f = SLIST_FIRST(&vsi->ftl);
2305 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2311 * Fill out fields in queue struct and setup tx/rx memory and structs
2314 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2316 device_t dev = pf->dev;
2317 struct i40e_hw *hw = &pf->hw;
2318 struct ixl_vsi *vsi = &pf->vsi;
2319 struct tx_ring *txr = &que->txr;
2320 struct rx_ring *rxr = &que->rxr;
2324 que->num_desc = pf->ringsz;
2329 txr->tail = I40E_QTX_TAIL(que->me);
2331 /* Initialize the TX lock */
2332 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2333 device_get_nameunit(dev), que->me);
2334 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2335 /* Create the TX descriptor ring */
2336 tsize = roundup2((que->num_desc *
2337 sizeof(struct i40e_tx_desc)) +
2338 sizeof(u32), DBA_ALIGN);
2339 if (i40e_allocate_dma_mem(hw,
2340 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2342 "Unable to allocate TX Descriptor memory\n");
2346 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2347 bzero((void *)txr->base, tsize);
2348 /* Now allocate transmit soft structs for the ring */
2349 if (ixl_allocate_tx_data(que)) {
2351 "Critical Failure setting up TX structures\n");
2355 /* Allocate a buf ring */
2356 txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2357 M_NOWAIT, &txr->mtx);
2358 if (txr->br == NULL) {
2360 "Critical Failure setting up TX buf ring\n");
2365 rsize = roundup2(que->num_desc *
2366 sizeof(union i40e_rx_desc), DBA_ALIGN);
2368 rxr->tail = I40E_QRX_TAIL(que->me);
2370 /* Initialize the RX side lock */
2371 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2372 device_get_nameunit(dev), que->me);
2373 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2375 if (i40e_allocate_dma_mem(hw,
2376 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2378 "Unable to allocate RX Descriptor memory\n");
2382 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2383 bzero((void *)rxr->base, rsize);
2384 /* Allocate receive soft structs for the ring*/
2385 if (ixl_allocate_rx_data(que)) {
2387 "Critical Failure setting up receive structs\n");
2395 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2396 if (mtx_initialized(&rxr->mtx))
2397 mtx_destroy(&rxr->mtx);
2399 buf_ring_free(txr->br, M_DEVBUF);
2403 i40e_free_dma_mem(&pf->hw, &txr->dma);
2404 if (mtx_initialized(&txr->mtx))
2405 mtx_destroy(&txr->mtx);
2410 /*********************************************************************
2412 * Allocate memory for the VSI (virtual station interface) and their
2413 * associated queues, rings and the descriptors associated with each,
2414 * called only once at attach.
2416 **********************************************************************/
2418 ixl_setup_stations(struct ixl_pf *pf)
2420 device_t dev = pf->dev;
2421 struct ixl_vsi *vsi;
2422 struct ixl_queue *que;
2426 vsi->back = (void *)pf;
2432 /* Get memory for the station queues */
2434 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2435 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2436 device_printf(dev, "Unable to allocate queue memory\n");
2441 /* Then setup each queue */
2442 for (int i = 0; i < vsi->num_queues; i++) {
2443 que = &vsi->queues[i];
2444 error = ixl_setup_queue(que, pf, i);
2453 ** Provide a update to the queue RX
2454 ** interrupt moderation value.
2457 ixl_set_queue_rx_itr(struct ixl_queue *que)
2459 struct ixl_vsi *vsi = que->vsi;
2460 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2461 struct i40e_hw *hw = vsi->hw;
2462 struct rx_ring *rxr = &que->rxr;
2467 /* Idle, do nothing */
2468 if (rxr->bytes == 0)
2471 if (pf->dynamic_rx_itr) {
2472 rx_bytes = rxr->bytes/rxr->itr;
2475 /* Adjust latency range */
2476 switch (rxr->latency) {
2477 case IXL_LOW_LATENCY:
2478 if (rx_bytes > 10) {
2479 rx_latency = IXL_AVE_LATENCY;
2480 rx_itr = IXL_ITR_20K;
2483 case IXL_AVE_LATENCY:
2484 if (rx_bytes > 20) {
2485 rx_latency = IXL_BULK_LATENCY;
2486 rx_itr = IXL_ITR_8K;
2487 } else if (rx_bytes <= 10) {
2488 rx_latency = IXL_LOW_LATENCY;
2489 rx_itr = IXL_ITR_100K;
2492 case IXL_BULK_LATENCY:
2493 if (rx_bytes <= 20) {
2494 rx_latency = IXL_AVE_LATENCY;
2495 rx_itr = IXL_ITR_20K;
2500 rxr->latency = rx_latency;
2502 if (rx_itr != rxr->itr) {
2503 /* do an exponential smoothing */
2504 rx_itr = (10 * rx_itr * rxr->itr) /
2505 ((9 * rx_itr) + rxr->itr);
2506 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2507 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2508 que->me), rxr->itr);
2510 } else { /* We may have have toggled to non-dynamic */
2511 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2512 vsi->rx_itr_setting = pf->rx_itr;
2513 /* Update the hardware if needed */
2514 if (rxr->itr != vsi->rx_itr_setting) {
2515 rxr->itr = vsi->rx_itr_setting;
2516 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2517 que->me), rxr->itr);
2527 ** Provide a update to the queue TX
2528 ** interrupt moderation value.
2531 ixl_set_queue_tx_itr(struct ixl_queue *que)
2533 struct ixl_vsi *vsi = que->vsi;
2534 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2535 struct i40e_hw *hw = vsi->hw;
2536 struct tx_ring *txr = &que->txr;
2542 /* Idle, do nothing */
2543 if (txr->bytes == 0)
2546 if (pf->dynamic_tx_itr) {
2547 tx_bytes = txr->bytes/txr->itr;
2550 switch (txr->latency) {
2551 case IXL_LOW_LATENCY:
2552 if (tx_bytes > 10) {
2553 tx_latency = IXL_AVE_LATENCY;
2554 tx_itr = IXL_ITR_20K;
2557 case IXL_AVE_LATENCY:
2558 if (tx_bytes > 20) {
2559 tx_latency = IXL_BULK_LATENCY;
2560 tx_itr = IXL_ITR_8K;
2561 } else if (tx_bytes <= 10) {
2562 tx_latency = IXL_LOW_LATENCY;
2563 tx_itr = IXL_ITR_100K;
2566 case IXL_BULK_LATENCY:
2567 if (tx_bytes <= 20) {
2568 tx_latency = IXL_AVE_LATENCY;
2569 tx_itr = IXL_ITR_20K;
2574 txr->latency = tx_latency;
2576 if (tx_itr != txr->itr) {
2577 /* do an exponential smoothing */
2578 tx_itr = (10 * tx_itr * txr->itr) /
2579 ((9 * tx_itr) + txr->itr);
2580 txr->itr = min(tx_itr, IXL_MAX_ITR);
2581 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2582 que->me), txr->itr);
2585 } else { /* We may have have toggled to non-dynamic */
2586 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2587 vsi->tx_itr_setting = pf->tx_itr;
2588 /* Update the hardware if needed */
2589 if (txr->itr != vsi->tx_itr_setting) {
2590 txr->itr = vsi->tx_itr_setting;
2591 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2592 que->me), txr->itr);
2601 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2602 struct sysctl_ctx_list *ctx, const char *sysctl_name)
2604 struct sysctl_oid *tree;
2605 struct sysctl_oid_list *child;
2606 struct sysctl_oid_list *vsi_list;
2608 tree = device_get_sysctl_tree(pf->dev);
2609 child = SYSCTL_CHILDREN(tree);
2610 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2611 CTLFLAG_RD, NULL, "VSI Number");
2612 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2614 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2619 * ixl_sysctl_qtx_tail_handler
2620 * Retrieves I40E_QTX_TAIL value from hardware
2624 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2626 struct ixl_queue *que;
2630 que = ((struct ixl_queue *)oidp->oid_arg1);
2633 val = rd32(que->vsi->hw, que->txr.tail);
2634 error = sysctl_handle_int(oidp, &val, 0, req);
2635 if (error || !req->newptr)
2641 * ixl_sysctl_qrx_tail_handler
2642 * Retrieves I40E_QRX_TAIL value from hardware
2646 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2648 struct ixl_queue *que;
2652 que = ((struct ixl_queue *)oidp->oid_arg1);
2655 val = rd32(que->vsi->hw, que->rxr.tail);
2656 error = sysctl_handle_int(oidp, &val, 0, req);
2657 if (error || !req->newptr)
2664 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2665 * Writes to the ITR registers immediately.
2668 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2670 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2671 device_t dev = pf->dev;
2673 int requested_tx_itr;
2675 requested_tx_itr = pf->tx_itr;
2676 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2677 if ((error) || (req->newptr == NULL))
2679 if (pf->dynamic_tx_itr) {
2681 "Cannot set TX itr value while dynamic TX itr is enabled\n");
2684 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2686 "Invalid TX itr value; value must be between 0 and %d\n",
2691 pf->tx_itr = requested_tx_itr;
2692 ixl_configure_tx_itr(pf);
2698 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2699 * Writes to the ITR registers immediately.
2702 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2704 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2705 device_t dev = pf->dev;
2707 int requested_rx_itr;
2709 requested_rx_itr = pf->rx_itr;
2710 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2711 if ((error) || (req->newptr == NULL))
2713 if (pf->dynamic_rx_itr) {
2715 "Cannot set RX itr value while dynamic RX itr is enabled\n");
2718 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2720 "Invalid RX itr value; value must be between 0 and %d\n",
2725 pf->rx_itr = requested_rx_itr;
2726 ixl_configure_rx_itr(pf);
2732 ixl_add_hw_stats(struct ixl_pf *pf)
2734 device_t dev = pf->dev;
2735 struct ixl_vsi *vsi = &pf->vsi;
2736 struct ixl_queue *queues = vsi->queues;
2737 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2739 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2740 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2741 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2742 struct sysctl_oid_list *vsi_list;
2744 struct sysctl_oid *queue_node;
2745 struct sysctl_oid_list *queue_list;
2747 struct tx_ring *txr;
2748 struct rx_ring *rxr;
2749 char queue_namebuf[QUEUE_NAME_LEN];
2751 /* Driver statistics */
2752 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2753 CTLFLAG_RD, &pf->watchdog_events,
2754 "Watchdog timeouts");
2755 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2756 CTLFLAG_RD, &pf->admin_irq,
2757 "Admin Queue IRQ Handled");
2759 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2760 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2762 /* Queue statistics */
2763 for (int q = 0; q < vsi->num_queues; q++) {
2764 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2765 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2766 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2767 queue_list = SYSCTL_CHILDREN(queue_node);
2769 txr = &(queues[q].txr);
2770 rxr = &(queues[q].rxr);
2772 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2773 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2774 "m_defrag() failed");
2775 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2776 CTLFLAG_RD, &(queues[q].irqs),
2777 "irqs on this queue");
2778 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2779 CTLFLAG_RD, &(queues[q].tso),
2781 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2782 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2783 "Driver tx dma failure in xmit");
2784 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2785 CTLFLAG_RD, &(queues[q].mss_too_small),
2786 "TSO sends with an MSS less than 64");
2787 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2788 CTLFLAG_RD, &(txr->no_desc),
2789 "Queue No Descriptor Available");
2790 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2791 CTLFLAG_RD, &(txr->total_packets),
2792 "Queue Packets Transmitted");
2793 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2794 CTLFLAG_RD, &(txr->tx_bytes),
2795 "Queue Bytes Transmitted");
2796 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2797 CTLFLAG_RD, &(rxr->rx_packets),
2798 "Queue Packets Received");
2799 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2800 CTLFLAG_RD, &(rxr->rx_bytes),
2801 "Queue Bytes Received");
2802 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2803 CTLFLAG_RD, &(rxr->desc_errs),
2804 "Queue Rx Descriptor Errors");
2805 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2806 CTLFLAG_RD, &(rxr->itr), 0,
2807 "Queue Rx ITR Interval");
2808 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2809 CTLFLAG_RD, &(txr->itr), 0,
2810 "Queue Tx ITR Interval");
2812 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2813 CTLFLAG_RD, &(rxr->not_done),
2814 "Queue Rx Descriptors not Done");
2815 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2816 CTLFLAG_RD, &(rxr->next_refresh), 0,
2817 "Queue Rx Descriptors not Done");
2818 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2819 CTLFLAG_RD, &(rxr->next_check), 0,
2820 "Queue Rx Descriptors not Done");
2821 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2822 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2823 sizeof(struct ixl_queue),
2824 ixl_sysctl_qtx_tail_handler, "IU",
2825 "Queue Transmit Descriptor Tail");
2826 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2827 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2828 sizeof(struct ixl_queue),
2829 ixl_sysctl_qrx_tail_handler, "IU",
2830 "Queue Receive Descriptor Tail");
2835 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2839 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2840 struct sysctl_oid_list *child,
2841 struct i40e_eth_stats *eth_stats)
2843 struct ixl_sysctl_info ctls[] =
2845 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2846 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
2847 "Unicast Packets Received"},
2848 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
2849 "Multicast Packets Received"},
2850 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
2851 "Broadcast Packets Received"},
2852 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2853 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2854 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2855 {ð_stats->tx_multicast, "mcast_pkts_txd",
2856 "Multicast Packets Transmitted"},
2857 {ð_stats->tx_broadcast, "bcast_pkts_txd",
2858 "Broadcast Packets Transmitted"},
2863 struct ixl_sysctl_info *entry = ctls;
2864 while (entry->stat != 0)
2866 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2867 CTLFLAG_RD, entry->stat,
2868 entry->description);
2874 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2875 struct sysctl_oid_list *child,
2876 struct i40e_hw_port_stats *stats)
2878 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2879 CTLFLAG_RD, NULL, "Mac Statistics");
2880 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2882 struct i40e_eth_stats *eth_stats = &stats->eth;
2883 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2885 struct ixl_sysctl_info ctls[] =
2887 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2888 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2889 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2890 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2891 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2892 /* Packet Reception Stats */
2893 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2894 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2895 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2896 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2897 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2898 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2899 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2900 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2901 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2902 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2903 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2904 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2905 /* Packet Transmission Stats */
2906 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2907 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2908 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2909 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2910 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2911 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2912 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2914 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2915 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2916 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2917 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2922 struct ixl_sysctl_info *entry = ctls;
2923 while (entry->stat != 0)
2925 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2926 CTLFLAG_RD, entry->stat,
2927 entry->description);
2933 ixl_set_rss_key(struct ixl_pf *pf)
2935 struct i40e_hw *hw = &pf->hw;
2936 struct ixl_vsi *vsi = &pf->vsi;
2937 device_t dev = pf->dev;
2938 enum i40e_status_code status;
2940 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2942 u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2943 0x183cfd8c, 0xce880440, 0x580cbc3c,
2944 0x35897377, 0x328b25e1, 0x4fa98922,
2945 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2950 /* Fetch the configured RSS key */
2951 rss_getkey((uint8_t *) &rss_seed);
2953 /* Fill out hash function seed */
2954 if (hw->mac.type == I40E_MAC_X722) {
2955 struct i40e_aqc_get_set_rss_key_data key_data;
2956 bcopy(rss_seed, key_data.standard_rss_key, 40);
2957 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2959 device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2960 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2962 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2963 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2968 * Configure enabled PCTYPES for RSS.
2971 ixl_set_rss_pctypes(struct ixl_pf *pf)
2973 struct i40e_hw *hw = &pf->hw;
2974 u64 set_hena = 0, hena;
2977 u32 rss_hash_config;
2979 rss_hash_config = rss_gethashconfig();
2980 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2981 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2982 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2983 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2984 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2985 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2986 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2987 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2988 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2989 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2990 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2991 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2992 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2993 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2995 if (hw->mac.type == I40E_MAC_X722)
2996 set_hena = IXL_DEFAULT_RSS_HENA_X722;
2998 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
3000 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3001 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3003 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
3004 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3009 ixl_set_rss_hlut(struct ixl_pf *pf)
3011 struct i40e_hw *hw = &pf->hw;
3012 device_t dev = pf->dev;
3013 struct ixl_vsi *vsi = &pf->vsi;
3015 int lut_entry_width;
3017 enum i40e_status_code status;
3019 if (hw->mac.type == I40E_MAC_X722)
3020 lut_entry_width = 7;
3022 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3024 /* Populate the LUT with max no. of queues in round robin fashion */
3026 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3029 * Fetch the RSS bucket id for the given indirection entry.
3030 * Cap it at the number of configured buckets (which is
3033 que_id = rss_get_indirection_to_bucket(i);
3034 que_id = que_id % vsi->num_queues;
3036 que_id = i % vsi->num_queues;
3038 lut = (que_id & ((0x1 << lut_entry_width) - 1));
3042 if (hw->mac.type == I40E_MAC_X722) {
3043 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3045 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3046 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3048 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3049 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3055 ** Setup the PF's RSS parameters.
3058 ixl_config_rss(struct ixl_pf *pf)
3060 ixl_set_rss_key(pf);
3061 ixl_set_rss_pctypes(pf);
3062 ixl_set_rss_hlut(pf);
3066 ** This routine is run via an vlan config EVENT,
3067 ** it enables us to use the HW Filter table since
3068 ** we can get the vlan id. This just creates the
3069 ** entry in the soft version of the VFTA, init will
3070 ** repopulate the real table.
3073 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3075 struct ixl_vsi *vsi = ifp->if_softc;
3076 struct i40e_hw *hw = vsi->hw;
3077 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3079 if (ifp->if_softc != arg) /* Not our event */
3082 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3087 ixl_add_filter(vsi, hw->mac.addr, vtag);
3092 ** This routine is run via an vlan
3093 ** unconfig EVENT, remove our entry
3094 ** in the soft vfta.
3097 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3099 struct ixl_vsi *vsi = ifp->if_softc;
3100 struct i40e_hw *hw = vsi->hw;
3101 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3103 if (ifp->if_softc != arg)
3106 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3111 ixl_del_filter(vsi, hw->mac.addr, vtag);
3116 ** This routine updates vlan filters, called by init
3117 ** it scans the filter table and then updates the hw
3118 ** after a soft reset.
3121 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3123 struct ixl_mac_filter *f;
3126 if (vsi->num_vlans == 0)
3129 ** Scan the filter list for vlan entries,
3130 ** mark them for addition and then call
3131 ** for the AQ update.
3133 SLIST_FOREACH(f, &vsi->ftl, next) {
3134 if (f->flags & IXL_FILTER_VLAN) {
3142 printf("setup vlan: no filters found!\n");
3145 flags = IXL_FILTER_VLAN;
3146 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3147 ixl_add_hw_filters(vsi, flags, cnt);
3152 ** Initialize filter list and add filters that the hardware
3153 ** needs to know about.
3155 ** Requires VSI's filter list & seid to be set before calling.
3158 ixl_init_filters(struct ixl_vsi *vsi)
3160 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3162 /* Add broadcast address */
3163 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3166 * Prevent Tx flow control frames from being sent out by
3167 * non-firmware transmitters.
3168 * This affects every VSI in the PF.
3170 if (pf->enable_tx_fc_filter)
3171 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3175 ** This routine adds mulicast filters
3178 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3180 struct ixl_mac_filter *f;
3182 /* Does one already exist */
3183 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3187 f = ixl_get_filter(vsi);
3189 printf("WARNING: no filter available!!\n");
3192 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3193 f->vlan = IXL_VLAN_ANY;
3194 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3201 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3203 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3207 ** This routine adds macvlan filters
3210 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3212 struct ixl_mac_filter *f, *tmp;
3216 DEBUGOUT("ixl_add_filter: begin");
3221 /* Does one already exist */
3222 f = ixl_find_filter(vsi, macaddr, vlan);
3226 ** Is this the first vlan being registered, if so we
3227 ** need to remove the ANY filter that indicates we are
3228 ** not in a vlan, and replace that with a 0 filter.
3230 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3231 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3233 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3234 ixl_add_filter(vsi, macaddr, 0);
3238 f = ixl_get_filter(vsi);
3240 device_printf(dev, "WARNING: no filter available!!\n");
3243 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3245 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3246 if (f->vlan != IXL_VLAN_ANY)
3247 f->flags |= IXL_FILTER_VLAN;
3251 ixl_add_hw_filters(vsi, f->flags, 1);
3256 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3258 struct ixl_mac_filter *f;
3260 f = ixl_find_filter(vsi, macaddr, vlan);
3264 f->flags |= IXL_FILTER_DEL;
3265 ixl_del_hw_filters(vsi, 1);
3268 /* Check if this is the last vlan removal */
3269 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3270 /* Switch back to a non-vlan filter */
3271 ixl_del_filter(vsi, macaddr, 0);
3272 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3278 ** Find the filter with both matching mac addr and vlan id
3280 struct ixl_mac_filter *
3281 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3283 struct ixl_mac_filter *f;
3286 SLIST_FOREACH(f, &vsi->ftl, next) {
3287 if (!cmp_etheraddr(f->macaddr, macaddr))
3289 if (f->vlan == vlan) {
3301 ** This routine takes additions to the vsi filter
3302 ** table and creates an Admin Queue call to create
3303 ** the filters in the hardware.
3306 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3308 struct i40e_aqc_add_macvlan_element_data *a, *b;
3309 struct ixl_mac_filter *f;
3318 IXL_PF_LOCK_ASSERT(pf);
3320 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3321 M_DEVBUF, M_NOWAIT | M_ZERO);
3323 device_printf(dev, "add_hw_filters failed to get memory\n");
3328 ** Scan the filter list, each time we find one
3329 ** we add it to the admin queue array and turn off
3332 SLIST_FOREACH(f, &vsi->ftl, next) {
3333 if (f->flags == flags) {
3334 b = &a[j]; // a pox on fvl long names :)
3335 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3336 if (f->vlan == IXL_VLAN_ANY) {
3338 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3340 b->vlan_tag = f->vlan;
3343 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3344 f->flags &= ~IXL_FILTER_ADD;
3351 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3353 device_printf(dev, "aq_add_macvlan err %d, "
3354 "aq_error %d\n", err, hw->aq.asq_last_status);
3356 vsi->hw_filters_add += j;
3363 ** This routine takes removals in the vsi filter
3364 ** table and creates an Admin Queue call to delete
3365 ** the filters in the hardware.
3368 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3370 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3374 struct ixl_mac_filter *f, *f_temp;
3377 DEBUGOUT("ixl_del_hw_filters: begin\n");
3383 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3384 M_DEVBUF, M_NOWAIT | M_ZERO);
3386 printf("del hw filter failed to get memory\n");
3390 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3391 if (f->flags & IXL_FILTER_DEL) {
3392 e = &d[j]; // a pox on fvl long names :)
3393 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3394 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3395 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3396 /* delete entry from vsi list */
3397 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3405 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3406 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3408 for (int i = 0; i < j; i++)
3409 sc += (!d[i].error_code);
3410 vsi->hw_filters_del += sc;
3412 "Failed to remove %d/%d filters, aq error %d\n",
3413 j - sc, j, hw->aq.asq_last_status);
3415 vsi->hw_filters_del += j;
3419 DEBUGOUT("ixl_del_hw_filters: end\n");
3424 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3426 struct i40e_hw *hw = &pf->hw;
3431 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3433 ixl_dbg(pf, IXL_DBG_EN_DIS,
3434 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3437 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3439 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3440 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3441 I40E_QTX_ENA_QENA_STAT_MASK;
3442 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3443 /* Verify the enable took */
3444 for (int j = 0; j < 10; j++) {
3445 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3446 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3448 i40e_msec_delay(10);
3450 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3451 device_printf(pf->dev, "TX queue %d still disabled!\n",
3460 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3462 struct i40e_hw *hw = &pf->hw;
3467 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3469 ixl_dbg(pf, IXL_DBG_EN_DIS,
3470 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3473 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3474 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3475 I40E_QRX_ENA_QENA_STAT_MASK;
3476 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3477 /* Verify the enable took */
3478 for (int j = 0; j < 10; j++) {
3479 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3480 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3482 i40e_msec_delay(10);
3484 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3485 device_printf(pf->dev, "RX queue %d still disabled!\n",
3494 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3498 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3499 /* Called function already prints error message */
3502 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3506 /* For PF VSI only */
3508 ixl_enable_rings(struct ixl_vsi *vsi)
3510 struct ixl_pf *pf = vsi->back;
3513 for (int i = 0; i < vsi->num_queues; i++) {
3514 error = ixl_enable_ring(pf, &pf->qtag, i);
3523 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3525 struct i40e_hw *hw = &pf->hw;
3530 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3532 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3533 i40e_usec_delay(500);
3535 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3536 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3537 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3538 /* Verify the disable took */
3539 for (int j = 0; j < 10; j++) {
3540 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3541 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3543 i40e_msec_delay(10);
3545 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3546 device_printf(pf->dev, "TX queue %d still enabled!\n",
3555 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3557 struct i40e_hw *hw = &pf->hw;
3562 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3564 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3565 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3566 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3567 /* Verify the disable took */
3568 for (int j = 0; j < 10; j++) {
3569 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3570 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3572 i40e_msec_delay(10);
3574 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3575 device_printf(pf->dev, "RX queue %d still enabled!\n",
3584 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3588 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3589 /* Called function already prints error message */
3592 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3596 /* For PF VSI only */
3598 ixl_disable_rings(struct ixl_vsi *vsi)
3600 struct ixl_pf *pf = vsi->back;
3603 for (int i = 0; i < vsi->num_queues; i++) {
3604 error = ixl_disable_ring(pf, &pf->qtag, i);
3613 * ixl_handle_mdd_event
3615 * Called from interrupt handler to identify possibly malicious vfs
3616 * (But also detects events from the PF, as well)
3619 ixl_handle_mdd_event(struct ixl_pf *pf)
3621 struct i40e_hw *hw = &pf->hw;
3622 device_t dev = pf->dev;
3623 bool mdd_detected = false;
3624 bool pf_mdd_detected = false;
3627 /* find what triggered the MDD event */
3628 reg = rd32(hw, I40E_GL_MDET_TX);
3629 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3630 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3631 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3632 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3633 I40E_GL_MDET_TX_EVENT_SHIFT;
3634 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3635 I40E_GL_MDET_TX_QUEUE_SHIFT;
3637 "Malicious Driver Detection event %d"
3638 " on TX queue %d, pf number %d\n",
3639 event, queue, pf_num);
3640 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3641 mdd_detected = true;
3643 reg = rd32(hw, I40E_GL_MDET_RX);
3644 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3645 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3646 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3647 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3648 I40E_GL_MDET_RX_EVENT_SHIFT;
3649 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3650 I40E_GL_MDET_RX_QUEUE_SHIFT;
3652 "Malicious Driver Detection event %d"
3653 " on RX queue %d, pf number %d\n",
3654 event, queue, pf_num);
3655 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3656 mdd_detected = true;
3660 reg = rd32(hw, I40E_PF_MDET_TX);
3661 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3662 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3664 "MDD TX event is for this function!");
3665 pf_mdd_detected = true;
3667 reg = rd32(hw, I40E_PF_MDET_RX);
3668 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3669 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3671 "MDD RX event is for this function!");
3672 pf_mdd_detected = true;
3676 /* re-enable mdd interrupt cause */
3677 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3678 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3679 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3684 ixl_enable_intr(struct ixl_vsi *vsi)
3686 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3687 struct i40e_hw *hw = vsi->hw;
3688 struct ixl_queue *que = vsi->queues;
3691 for (int i = 0; i < vsi->num_queues; i++, que++)
3692 ixl_enable_queue(hw, que->me);
3694 ixl_enable_intr0(hw);
3698 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3700 struct i40e_hw *hw = vsi->hw;
3701 struct ixl_queue *que = vsi->queues;
3703 for (int i = 0; i < vsi->num_queues; i++, que++)
3704 ixl_disable_queue(hw, que->me);
3708 ixl_enable_intr0(struct i40e_hw *hw)
3712 /* Use IXL_ITR_NONE so ITR isn't updated here */
3713 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3714 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3715 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3716 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3720 ixl_disable_intr0(struct i40e_hw *hw)
3724 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3725 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3730 ixl_enable_queue(struct i40e_hw *hw, int id)
3734 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3735 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3736 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3737 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3741 ixl_disable_queue(struct i40e_hw *hw, int id)
3745 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3746 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3750 ixl_update_stats_counters(struct ixl_pf *pf)
3752 struct i40e_hw *hw = &pf->hw;
3753 struct ixl_vsi *vsi = &pf->vsi;
3756 struct i40e_hw_port_stats *nsd = &pf->stats;
3757 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3759 /* Update hw stats */
3760 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3761 pf->stat_offsets_loaded,
3762 &osd->crc_errors, &nsd->crc_errors);
3763 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3764 pf->stat_offsets_loaded,
3765 &osd->illegal_bytes, &nsd->illegal_bytes);
3766 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3767 I40E_GLPRT_GORCL(hw->port),
3768 pf->stat_offsets_loaded,
3769 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3770 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3771 I40E_GLPRT_GOTCL(hw->port),
3772 pf->stat_offsets_loaded,
3773 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3774 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3775 pf->stat_offsets_loaded,
3776 &osd->eth.rx_discards,
3777 &nsd->eth.rx_discards);
3778 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3779 I40E_GLPRT_UPRCL(hw->port),
3780 pf->stat_offsets_loaded,
3781 &osd->eth.rx_unicast,
3782 &nsd->eth.rx_unicast);
3783 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3784 I40E_GLPRT_UPTCL(hw->port),
3785 pf->stat_offsets_loaded,
3786 &osd->eth.tx_unicast,
3787 &nsd->eth.tx_unicast);
3788 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3789 I40E_GLPRT_MPRCL(hw->port),
3790 pf->stat_offsets_loaded,
3791 &osd->eth.rx_multicast,
3792 &nsd->eth.rx_multicast);
3793 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3794 I40E_GLPRT_MPTCL(hw->port),
3795 pf->stat_offsets_loaded,
3796 &osd->eth.tx_multicast,
3797 &nsd->eth.tx_multicast);
3798 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3799 I40E_GLPRT_BPRCL(hw->port),
3800 pf->stat_offsets_loaded,
3801 &osd->eth.rx_broadcast,
3802 &nsd->eth.rx_broadcast);
3803 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3804 I40E_GLPRT_BPTCL(hw->port),
3805 pf->stat_offsets_loaded,
3806 &osd->eth.tx_broadcast,
3807 &nsd->eth.tx_broadcast);
3809 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3810 pf->stat_offsets_loaded,
3811 &osd->tx_dropped_link_down,
3812 &nsd->tx_dropped_link_down);
3813 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3814 pf->stat_offsets_loaded,
3815 &osd->mac_local_faults,
3816 &nsd->mac_local_faults);
3817 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3818 pf->stat_offsets_loaded,
3819 &osd->mac_remote_faults,
3820 &nsd->mac_remote_faults);
3821 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3822 pf->stat_offsets_loaded,
3823 &osd->rx_length_errors,
3824 &nsd->rx_length_errors);
3826 /* Flow control (LFC) stats */
3827 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3828 pf->stat_offsets_loaded,
3829 &osd->link_xon_rx, &nsd->link_xon_rx);
3830 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3831 pf->stat_offsets_loaded,
3832 &osd->link_xon_tx, &nsd->link_xon_tx);
3833 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3834 pf->stat_offsets_loaded,
3835 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3836 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3837 pf->stat_offsets_loaded,
3838 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3840 /* Packet size stats rx */
3841 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3842 I40E_GLPRT_PRC64L(hw->port),
3843 pf->stat_offsets_loaded,
3844 &osd->rx_size_64, &nsd->rx_size_64);
3845 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3846 I40E_GLPRT_PRC127L(hw->port),
3847 pf->stat_offsets_loaded,
3848 &osd->rx_size_127, &nsd->rx_size_127);
3849 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3850 I40E_GLPRT_PRC255L(hw->port),
3851 pf->stat_offsets_loaded,
3852 &osd->rx_size_255, &nsd->rx_size_255);
3853 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3854 I40E_GLPRT_PRC511L(hw->port),
3855 pf->stat_offsets_loaded,
3856 &osd->rx_size_511, &nsd->rx_size_511);
3857 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3858 I40E_GLPRT_PRC1023L(hw->port),
3859 pf->stat_offsets_loaded,
3860 &osd->rx_size_1023, &nsd->rx_size_1023);
3861 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3862 I40E_GLPRT_PRC1522L(hw->port),
3863 pf->stat_offsets_loaded,
3864 &osd->rx_size_1522, &nsd->rx_size_1522);
3865 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3866 I40E_GLPRT_PRC9522L(hw->port),
3867 pf->stat_offsets_loaded,
3868 &osd->rx_size_big, &nsd->rx_size_big);
3870 /* Packet size stats tx */
3871 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3872 I40E_GLPRT_PTC64L(hw->port),
3873 pf->stat_offsets_loaded,
3874 &osd->tx_size_64, &nsd->tx_size_64);
3875 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3876 I40E_GLPRT_PTC127L(hw->port),
3877 pf->stat_offsets_loaded,
3878 &osd->tx_size_127, &nsd->tx_size_127);
3879 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3880 I40E_GLPRT_PTC255L(hw->port),
3881 pf->stat_offsets_loaded,
3882 &osd->tx_size_255, &nsd->tx_size_255);
3883 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3884 I40E_GLPRT_PTC511L(hw->port),
3885 pf->stat_offsets_loaded,
3886 &osd->tx_size_511, &nsd->tx_size_511);
3887 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3888 I40E_GLPRT_PTC1023L(hw->port),
3889 pf->stat_offsets_loaded,
3890 &osd->tx_size_1023, &nsd->tx_size_1023);
3891 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3892 I40E_GLPRT_PTC1522L(hw->port),
3893 pf->stat_offsets_loaded,
3894 &osd->tx_size_1522, &nsd->tx_size_1522);
3895 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3896 I40E_GLPRT_PTC9522L(hw->port),
3897 pf->stat_offsets_loaded,
3898 &osd->tx_size_big, &nsd->tx_size_big);
3900 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3901 pf->stat_offsets_loaded,
3902 &osd->rx_undersize, &nsd->rx_undersize);
3903 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3904 pf->stat_offsets_loaded,
3905 &osd->rx_fragments, &nsd->rx_fragments);
3906 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3907 pf->stat_offsets_loaded,
3908 &osd->rx_oversize, &nsd->rx_oversize);
3909 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3910 pf->stat_offsets_loaded,
3911 &osd->rx_jabber, &nsd->rx_jabber);
3912 pf->stat_offsets_loaded = true;
3915 /* Update vsi stats */
3916 ixl_update_vsi_stats(vsi);
3918 for (int i = 0; i < pf->num_vfs; i++) {
3920 if (vf->vf_flags & VF_FLAG_ENABLED)
3921 ixl_update_eth_stats(&pf->vfs[i].vsi);
3926 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3928 struct i40e_hw *hw = &pf->hw;
3929 struct ixl_vsi *vsi = &pf->vsi;
3930 device_t dev = pf->dev;
3934 is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3939 error = i40e_shutdown_lan_hmc(hw);
3942 "Shutdown LAN HMC failed with code %d\n", error);
3943 ixl_disable_intr0(hw);
3944 ixl_teardown_adminq_msix(pf);
3945 error = i40e_shutdown_adminq(hw);
3948 "Shutdown Admin queue failed with code %d\n", error);
3951 error = i40e_init_adminq(hw);
3952 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3953 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3956 error = ixl_setup_adminq_msix(pf);
3958 device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3961 ixl_configure_intr0_msix(pf);
3962 ixl_enable_intr0(hw);
3963 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3964 hw->func_caps.num_rx_qp, 0, 0);
3966 device_printf(dev, "init_lan_hmc failed: %d\n", error);
3968 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3970 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3979 ixl_handle_empr_reset(struct ixl_pf *pf)
3981 struct i40e_hw *hw = &pf->hw;
3982 device_t dev = pf->dev;
3986 /* Typically finishes within 3-4 seconds */
3987 while (count++ < 100) {
3988 reg = rd32(hw, I40E_GLGEN_RSTAT)
3989 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3991 i40e_msec_delay(100);
3995 ixl_dbg(pf, IXL_DBG_INFO,
3996 "EMPR reset wait count: %d\n", count);
3998 device_printf(dev, "Rebuilding driver state...\n");
3999 ixl_rebuild_hw_structs_after_reset(pf);
4000 device_printf(dev, "Rebuilding driver state done.\n");
4002 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4006 ** Tasklet handler for MSIX Adminq interrupts
4007 ** - do outside interrupt since it might sleep
4010 ixl_do_adminq(void *context, int pending)
4012 struct ixl_pf *pf = context;
4013 struct i40e_hw *hw = &pf->hw;
4014 struct i40e_arq_event_info event;
4016 device_t dev = pf->dev;
4020 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4021 /* Flag cleared at end of this function */
4022 ixl_handle_empr_reset(pf);
4026 /* Admin Queue handling */
4027 event.buf_len = IXL_AQ_BUF_SZ;
4028 event.msg_buf = malloc(event.buf_len,
4029 M_DEVBUF, M_NOWAIT | M_ZERO);
4030 if (!event.msg_buf) {
4031 device_printf(dev, "%s: Unable to allocate memory for Admin"
4032 " Queue event!\n", __func__);
4037 /* clean and process any events */
4039 ret = i40e_clean_arq_element(hw, &event, &result);
4042 opcode = LE16_TO_CPU(event.desc.opcode);
4043 ixl_dbg(pf, IXL_DBG_AQ,
4044 "Admin Queue event: %#06x\n", opcode);
4046 case i40e_aqc_opc_get_link_status:
4047 ixl_link_event(pf, &event);
4049 case i40e_aqc_opc_send_msg_to_pf:
4051 ixl_handle_vf_msg(pf, &event);
4054 case i40e_aqc_opc_event_lan_overflow:
4059 } while (result && (loop++ < IXL_ADM_LIMIT));
4061 free(event.msg_buf, M_DEVBUF);
4064 * If there are still messages to process, reschedule ourselves.
4065 * Otherwise, re-enable our interrupt.
4068 taskqueue_enqueue(pf->tq, &pf->adminq);
4070 ixl_enable_intr0(hw);
4076 * Update VSI-specific ethernet statistics counters.
4079 ixl_update_eth_stats(struct ixl_vsi *vsi)
4081 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4082 struct i40e_hw *hw = &pf->hw;
4083 struct i40e_eth_stats *es;
4084 struct i40e_eth_stats *oes;
4085 struct i40e_hw_port_stats *nsd;
4086 u16 stat_idx = vsi->info.stat_counter_idx;
4088 es = &vsi->eth_stats;
4089 oes = &vsi->eth_stats_offsets;
4092 /* Gather up the stats that the hw collects */
4093 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4094 vsi->stat_offsets_loaded,
4095 &oes->tx_errors, &es->tx_errors);
4096 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4097 vsi->stat_offsets_loaded,
4098 &oes->rx_discards, &es->rx_discards);
4100 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4101 I40E_GLV_GORCL(stat_idx),
4102 vsi->stat_offsets_loaded,
4103 &oes->rx_bytes, &es->rx_bytes);
4104 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4105 I40E_GLV_UPRCL(stat_idx),
4106 vsi->stat_offsets_loaded,
4107 &oes->rx_unicast, &es->rx_unicast);
4108 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4109 I40E_GLV_MPRCL(stat_idx),
4110 vsi->stat_offsets_loaded,
4111 &oes->rx_multicast, &es->rx_multicast);
4112 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4113 I40E_GLV_BPRCL(stat_idx),
4114 vsi->stat_offsets_loaded,
4115 &oes->rx_broadcast, &es->rx_broadcast);
4117 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4118 I40E_GLV_GOTCL(stat_idx),
4119 vsi->stat_offsets_loaded,
4120 &oes->tx_bytes, &es->tx_bytes);
4121 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4122 I40E_GLV_UPTCL(stat_idx),
4123 vsi->stat_offsets_loaded,
4124 &oes->tx_unicast, &es->tx_unicast);
4125 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4126 I40E_GLV_MPTCL(stat_idx),
4127 vsi->stat_offsets_loaded,
4128 &oes->tx_multicast, &es->tx_multicast);
4129 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4130 I40E_GLV_BPTCL(stat_idx),
4131 vsi->stat_offsets_loaded,
4132 &oes->tx_broadcast, &es->tx_broadcast);
4133 vsi->stat_offsets_loaded = true;
4137 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4141 struct i40e_eth_stats *es;
4144 struct i40e_hw_port_stats *nsd;
4148 es = &vsi->eth_stats;
4151 ixl_update_eth_stats(vsi);
4153 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4154 for (int i = 0; i < vsi->num_queues; i++)
4155 tx_discards += vsi->queues[i].txr.br->br_drops;
4157 /* Update ifnet stats */
4158 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4161 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4164 IXL_SET_IBYTES(vsi, es->rx_bytes);
4165 IXL_SET_OBYTES(vsi, es->tx_bytes);
4166 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4167 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4169 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4170 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4172 IXL_SET_OERRORS(vsi, es->tx_errors);
4173 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4174 IXL_SET_OQDROPS(vsi, tx_discards);
4175 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4176 IXL_SET_COLLISIONS(vsi, 0);
4180 * Reset all of the stats for the given pf
4183 ixl_pf_reset_stats(struct ixl_pf *pf)
4185 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4186 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4187 pf->stat_offsets_loaded = false;
4191 * Resets all stats of the given vsi
4194 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4196 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4197 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4198 vsi->stat_offsets_loaded = false;
4202 * Read and update a 48 bit stat from the hw
4204 * Since the device stats are not reset at PFReset, they likely will not
4205 * be zeroed when the driver starts. We'll save the first values read
4206 * and use them as offsets to be subtracted from the raw values in order
4207 * to report stats that count from zero.
4210 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4211 bool offset_loaded, u64 *offset, u64 *stat)
4215 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4216 new_data = rd64(hw, loreg);
4219 * Use two rd32's instead of one rd64; FreeBSD versions before
4220 * 10 don't support 64-bit bus reads/writes.
4222 new_data = rd32(hw, loreg);
4223 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4228 if (new_data >= *offset)
4229 *stat = new_data - *offset;
4231 *stat = (new_data + ((u64)1 << 48)) - *offset;
4232 *stat &= 0xFFFFFFFFFFFFULL;
4236 * Read and update a 32 bit stat from the hw
4239 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4240 bool offset_loaded, u64 *offset, u64 *stat)
4244 new_data = rd32(hw, reg);
4247 if (new_data >= *offset)
4248 *stat = (u32)(new_data - *offset);
4250 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4254 ixl_add_device_sysctls(struct ixl_pf *pf)
4256 device_t dev = pf->dev;
4257 struct i40e_hw *hw = &pf->hw;
4259 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4260 struct sysctl_oid_list *ctx_list =
4261 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4263 struct sysctl_oid *debug_node;
4264 struct sysctl_oid_list *debug_list;
4266 struct sysctl_oid *fec_node;
4267 struct sysctl_oid_list *fec_list;
4269 /* Set up sysctls */
4270 SYSCTL_ADD_PROC(ctx, ctx_list,
4271 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4272 pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4274 SYSCTL_ADD_PROC(ctx, ctx_list,
4275 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4276 pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4278 SYSCTL_ADD_PROC(ctx, ctx_list,
4279 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4280 pf, 0, ixl_current_speed, "A", "Current Port Speed");
4282 SYSCTL_ADD_PROC(ctx, ctx_list,
4283 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4284 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4286 SYSCTL_ADD_PROC(ctx, ctx_list,
4287 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4288 pf, 0, ixl_sysctl_unallocated_queues, "I",
4289 "Queues not allocated to a PF or VF");
4291 SYSCTL_ADD_PROC(ctx, ctx_list,
4292 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4293 pf, 0, ixl_sysctl_pf_tx_itr, "I",
4294 "Immediately set TX ITR value for all queues");
4296 SYSCTL_ADD_PROC(ctx, ctx_list,
4297 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4298 pf, 0, ixl_sysctl_pf_rx_itr, "I",
4299 "Immediately set RX ITR value for all queues");
4301 SYSCTL_ADD_INT(ctx, ctx_list,
4302 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4303 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4305 SYSCTL_ADD_INT(ctx, ctx_list,
4306 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4307 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4309 /* Add FEC sysctls for 25G adapters */
4311 * XXX: These settings can be changed, but that isn't supported,
4312 * so these are read-only for now.
4314 if (hw->device_id == I40E_DEV_ID_25G_B
4315 || hw->device_id == I40E_DEV_ID_25G_SFP28) {
4316 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4317 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4318 fec_list = SYSCTL_CHILDREN(fec_node);
4320 SYSCTL_ADD_PROC(ctx, fec_list,
4321 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
4322 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4324 SYSCTL_ADD_PROC(ctx, fec_list,
4325 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
4326 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4328 SYSCTL_ADD_PROC(ctx, fec_list,
4329 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
4330 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4332 SYSCTL_ADD_PROC(ctx, fec_list,
4333 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
4334 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4336 SYSCTL_ADD_PROC(ctx, fec_list,
4337 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
4338 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4341 /* Add sysctls meant to print debug information, but don't list them
4342 * in "sysctl -a" output. */
4343 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4344 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4345 debug_list = SYSCTL_CHILDREN(debug_node);
4347 SYSCTL_ADD_UINT(ctx, debug_list,
4348 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4349 &pf->hw.debug_mask, 0, "Shared code debug message level");
4351 SYSCTL_ADD_UINT(ctx, debug_list,
4352 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4353 &pf->dbg_mask, 0, "Non-hared code debug message level");
4355 SYSCTL_ADD_PROC(ctx, debug_list,
4356 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4357 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4359 SYSCTL_ADD_PROC(ctx, debug_list,
4360 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4361 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4363 SYSCTL_ADD_PROC(ctx, debug_list,
4364 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4365 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4367 SYSCTL_ADD_PROC(ctx, debug_list,
4368 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4369 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4371 SYSCTL_ADD_PROC(ctx, debug_list,
4372 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4373 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4375 SYSCTL_ADD_PROC(ctx, debug_list,
4376 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4377 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4379 SYSCTL_ADD_PROC(ctx, debug_list,
4380 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4381 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4383 SYSCTL_ADD_PROC(ctx, debug_list,
4384 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4385 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4387 SYSCTL_ADD_PROC(ctx, debug_list,
4388 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4389 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4392 SYSCTL_ADD_PROC(ctx, debug_list,
4393 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4394 pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4396 SYSCTL_ADD_PROC(ctx, debug_list,
4397 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4398 pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4402 SYSCTL_ADD_UINT(ctx, debug_list,
4403 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4404 0, "PF/VF Virtual Channel debug level");
4409 * Primarily for finding out how many queues can be assigned to VFs,
4413 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4415 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4419 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4422 return sysctl_handle_int(oidp, NULL, queues, req);
4426 ** Set flow control using sysctl:
4433 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4435 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4436 struct i40e_hw *hw = &pf->hw;
4437 device_t dev = pf->dev;
4438 int requested_fc, error = 0;
4439 enum i40e_status_code aq_error = 0;
4443 requested_fc = pf->fc;
4444 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4445 if ((error) || (req->newptr == NULL))
4447 if (requested_fc < 0 || requested_fc > 3) {
4449 "Invalid fc mode; valid modes are 0 through 3\n");
4453 /* Set fc ability for port */
4454 hw->fc.requested_mode = requested_fc;
4455 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4458 "%s: Error setting new fc mode %d; fc_err %#x\n",
4459 __func__, aq_error, fc_aq_err);
4462 pf->fc = requested_fc;
4464 /* Get new link state */
4465 i40e_msec_delay(250);
4466 hw->phy.get_link_info = TRUE;
4467 i40e_get_link_status(hw, &pf->link_up);
4473 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4487 switch (link_speed) {
4488 case I40E_LINK_SPEED_100MB:
4491 case I40E_LINK_SPEED_1GB:
4494 case I40E_LINK_SPEED_10GB:
4497 case I40E_LINK_SPEED_40GB:
4500 case I40E_LINK_SPEED_20GB:
4503 case I40E_LINK_SPEED_25GB:
4506 case I40E_LINK_SPEED_UNKNOWN:
4512 return speeds[index];
4516 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4518 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4519 struct i40e_hw *hw = &pf->hw;
4522 ixl_update_link_status(pf);
4524 error = sysctl_handle_string(oidp,
4525 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4531 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4533 static u16 speedmap[6] = {
4534 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4535 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
4536 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
4537 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
4538 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
4539 (I40E_LINK_SPEED_40GB | (0x20 << 8))
4543 for (int i = 0; i < 6; i++) {
4545 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4547 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4554 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4556 struct i40e_hw *hw = &pf->hw;
4557 device_t dev = pf->dev;
4558 struct i40e_aq_get_phy_abilities_resp abilities;
4559 struct i40e_aq_set_phy_config config;
4560 enum i40e_status_code aq_error = 0;
4562 /* Get current capability information */
4563 aq_error = i40e_aq_get_phy_capabilities(hw,
4564 FALSE, FALSE, &abilities, NULL);
4567 "%s: Error getting phy capabilities %d,"
4568 " aq error: %d\n", __func__, aq_error,
4569 hw->aq.asq_last_status);
4573 /* Prepare new config */
4574 bzero(&config, sizeof(config));
4575 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4576 config.phy_type = abilities.phy_type;
4577 config.phy_type_ext = abilities.phy_type_ext;
4578 config.abilities = abilities.abilities
4579 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4580 config.eee_capability = abilities.eee_capability;
4581 config.eeer = abilities.eeer_val;
4582 config.low_power_ctrl = abilities.d3_lpan;
4584 /* Do aq command & restart link */
4585 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4588 "%s: Error setting new phy config %d,"
4589 " aq error: %d\n", __func__, aq_error,
4590 hw->aq.asq_last_status);
4598 ** Control link advertise speed:
4600 ** 0x1 - advertise 100 Mb
4601 ** 0x2 - advertise 1G
4602 ** 0x4 - advertise 10G
4603 ** 0x8 - advertise 20G
4604 ** 0x10 - advertise 25G
4605 ** 0x20 - advertise 40G
4607 ** Set to 0 to disable link
4610 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4612 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4613 struct i40e_hw *hw = &pf->hw;
4614 device_t dev = pf->dev;
4615 u8 converted_speeds;
4616 int requested_ls = 0;
4619 /* Read in new mode */
4620 requested_ls = pf->advertised_speed;
4621 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4622 if ((error) || (req->newptr == NULL))
4624 /* Check if changing speeds is supported */
4625 switch (hw->device_id) {
4626 case I40E_DEV_ID_25G_B:
4627 case I40E_DEV_ID_25G_SFP28:
4628 device_printf(dev, "Changing advertised speeds not supported"
4629 " on this device.\n");
4632 if (requested_ls < 0 || requested_ls > 0xff) {
4635 /* Check for valid value */
4636 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4637 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4638 device_printf(dev, "Invalid advertised speed; "
4639 "valid flags are: 0x%02x\n",
4640 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4644 error = ixl_set_advertised_speeds(pf, requested_ls);
4648 pf->advertised_speed = requested_ls;
4649 ixl_update_link_status(pf);
4654 * Input: bitmap of enum i40e_aq_link_speed
4657 ixl_max_aq_speed_to_value(u8 link_speeds)
4659 if (link_speeds & I40E_LINK_SPEED_40GB)
4661 if (link_speeds & I40E_LINK_SPEED_25GB)
4663 if (link_speeds & I40E_LINK_SPEED_20GB)
4665 if (link_speeds & I40E_LINK_SPEED_10GB)
4667 if (link_speeds & I40E_LINK_SPEED_1GB)
4669 if (link_speeds & I40E_LINK_SPEED_100MB)
4670 return IF_Mbps(100);
4672 /* Minimum supported link speed */
4673 return IF_Mbps(100);
4677 ** Get the width and transaction speed of
4678 ** the bus this adapter is plugged into.
4681 ixl_get_bus_info(struct ixl_pf *pf)
4683 struct i40e_hw *hw = &pf->hw;
4684 device_t dev = pf->dev;
4686 u32 offset, num_ports;
4689 /* Some devices don't use PCIE */
4690 if (hw->mac.type == I40E_MAC_X722)
4693 /* Read PCI Express Capabilities Link Status Register */
4694 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4695 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4697 /* Fill out hw struct with PCIE info */
4698 i40e_set_pci_config_data(hw, link);
4700 /* Use info to print out bandwidth messages */
4701 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4702 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4703 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4704 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4705 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4706 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4707 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4708 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4712 * If adapter is in slot with maximum supported speed,
4713 * no warning message needs to be printed out.
4715 if (hw->bus.speed >= i40e_bus_speed_8000
4716 && hw->bus.width >= i40e_bus_width_pcie_x8)
4719 num_ports = bitcount32(hw->func_caps.valid_functions);
4720 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4722 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4723 device_printf(dev, "PCI-Express bandwidth available"
4724 " for this device may be insufficient for"
4725 " optimal performance.\n");
4726 device_printf(dev, "Please move the device to a different"
4727 " PCI-e link with more lanes and/or higher"
4728 " transfer rate.\n");
4733 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4735 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4736 struct i40e_hw *hw = &pf->hw;
4739 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4740 ixl_nvm_version_str(hw, sbuf);
4748 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4750 if ((nvma->command == I40E_NVM_READ) &&
4751 ((nvma->config & 0xFF) == 0xF) &&
4752 (((nvma->config & 0xF00) >> 8) == 0xF) &&
4753 (nvma->offset == 0) &&
4754 (nvma->data_size == 1)) {
4755 // device_printf(dev, "- Get Driver Status Command\n");
4757 else if (nvma->command == I40E_NVM_READ) {
4761 switch (nvma->command) {
4763 device_printf(dev, "- command: I40E_NVM_READ\n");
4766 device_printf(dev, "- command: I40E_NVM_WRITE\n");
4769 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4773 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
4774 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4775 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4776 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4781 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4783 struct i40e_hw *hw = &pf->hw;
4784 struct i40e_nvm_access *nvma;
4785 device_t dev = pf->dev;
4786 enum i40e_status_code status = 0;
4789 DEBUGFUNC("ixl_handle_nvmupd_cmd");
4792 if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4793 ifd->ifd_data == NULL) {
4794 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4796 device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4797 __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4798 device_printf(dev, "%s: data pointer: %p\n", __func__,
4803 nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4805 if (pf->dbg_mask & IXL_DBG_NVMUPD)
4806 ixl_print_nvm_cmd(dev, nvma);
4808 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4810 while (count++ < 100) {
4811 i40e_msec_delay(100);
4812 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4817 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4819 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4826 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
4827 i40e_stat_str(hw, status), perrno);
4830 * -EPERM is actually ERESTART, which the kernel interprets as it needing
4831 * to run this ioctl again. So use -EACCES for -EPERM instead.
4833 if (perrno == -EPERM)
4839 /*********************************************************************
4841 * Media Ioctl callback
4843 * This routine is called whenever the user queries the status of
4844 * the interface using ifconfig.
4846 **********************************************************************/
4848 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4850 struct ixl_vsi *vsi = ifp->if_softc;
4851 struct ixl_pf *pf = vsi->back;
4852 struct i40e_hw *hw = &pf->hw;
4854 INIT_DEBUGOUT("ixl_media_status: begin");
4857 hw->phy.get_link_info = TRUE;
4858 i40e_get_link_status(hw, &pf->link_up);
4859 ixl_update_link_status(pf);
4861 ifmr->ifm_status = IFM_AVALID;
4862 ifmr->ifm_active = IFM_ETHER;
4869 ifmr->ifm_status |= IFM_ACTIVE;
4871 /* Hardware always does full-duplex */
4872 ifmr->ifm_active |= IFM_FDX;
4874 switch (hw->phy.link_info.phy_type) {
4876 case I40E_PHY_TYPE_100BASE_TX:
4877 ifmr->ifm_active |= IFM_100_TX;
4880 case I40E_PHY_TYPE_1000BASE_T:
4881 ifmr->ifm_active |= IFM_1000_T;
4883 case I40E_PHY_TYPE_1000BASE_SX:
4884 ifmr->ifm_active |= IFM_1000_SX;
4886 case I40E_PHY_TYPE_1000BASE_LX:
4887 ifmr->ifm_active |= IFM_1000_LX;
4889 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4890 ifmr->ifm_active |= IFM_OTHER;
4893 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4894 ifmr->ifm_active |= IFM_10G_TWINAX;
4896 case I40E_PHY_TYPE_10GBASE_SR:
4897 ifmr->ifm_active |= IFM_10G_SR;
4899 case I40E_PHY_TYPE_10GBASE_LR:
4900 ifmr->ifm_active |= IFM_10G_LR;
4902 case I40E_PHY_TYPE_10GBASE_T:
4903 ifmr->ifm_active |= IFM_10G_T;
4905 case I40E_PHY_TYPE_XAUI:
4906 case I40E_PHY_TYPE_XFI:
4907 case I40E_PHY_TYPE_10GBASE_AOC:
4908 ifmr->ifm_active |= IFM_OTHER;
4911 case I40E_PHY_TYPE_25GBASE_KR:
4912 ifmr->ifm_active |= IFM_25G_KR;
4914 case I40E_PHY_TYPE_25GBASE_CR:
4915 ifmr->ifm_active |= IFM_25G_CR;
4917 case I40E_PHY_TYPE_25GBASE_SR:
4918 ifmr->ifm_active |= IFM_25G_SR;
4920 case I40E_PHY_TYPE_25GBASE_LR:
4921 ifmr->ifm_active |= IFM_UNKNOWN;
4924 case I40E_PHY_TYPE_40GBASE_CR4:
4925 case I40E_PHY_TYPE_40GBASE_CR4_CU:
4926 ifmr->ifm_active |= IFM_40G_CR4;
4928 case I40E_PHY_TYPE_40GBASE_SR4:
4929 ifmr->ifm_active |= IFM_40G_SR4;
4931 case I40E_PHY_TYPE_40GBASE_LR4:
4932 ifmr->ifm_active |= IFM_40G_LR4;
4934 case I40E_PHY_TYPE_XLAUI:
4935 ifmr->ifm_active |= IFM_OTHER;
4937 case I40E_PHY_TYPE_1000BASE_KX:
4938 ifmr->ifm_active |= IFM_1000_KX;
4940 case I40E_PHY_TYPE_SGMII:
4941 ifmr->ifm_active |= IFM_1000_SGMII;
4943 /* ERJ: What's the difference between these? */
4944 case I40E_PHY_TYPE_10GBASE_CR1_CU:
4945 case I40E_PHY_TYPE_10GBASE_CR1:
4946 ifmr->ifm_active |= IFM_10G_CR1;
4948 case I40E_PHY_TYPE_10GBASE_KX4:
4949 ifmr->ifm_active |= IFM_10G_KX4;
4951 case I40E_PHY_TYPE_10GBASE_KR:
4952 ifmr->ifm_active |= IFM_10G_KR;
4954 case I40E_PHY_TYPE_SFI:
4955 ifmr->ifm_active |= IFM_10G_SFI;
4957 /* Our single 20G media type */
4958 case I40E_PHY_TYPE_20GBASE_KR2:
4959 ifmr->ifm_active |= IFM_20G_KR2;
4961 case I40E_PHY_TYPE_40GBASE_KR4:
4962 ifmr->ifm_active |= IFM_40G_KR4;
4964 case I40E_PHY_TYPE_XLPPI:
4965 case I40E_PHY_TYPE_40GBASE_AOC:
4966 ifmr->ifm_active |= IFM_40G_XLPPI;
4968 /* Unknown to driver */
4970 ifmr->ifm_active |= IFM_UNKNOWN;
4973 /* Report flow control status as well */
4974 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4975 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4976 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4977 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4985 struct ixl_pf *pf = arg;
4988 ixl_init_locked(pf);
4993 * NOTE: Fortville does not support forcing media speeds. Instead,
4994 * use the set_advertise sysctl to set the speeds Fortville
4995 * will advertise or be allowed to operate at.
4998 ixl_media_change(struct ifnet * ifp)
5000 struct ixl_vsi *vsi = ifp->if_softc;
5001 struct ifmedia *ifm = &vsi->media;
5003 INIT_DEBUGOUT("ixl_media_change: begin");
5005 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5008 if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5013 /*********************************************************************
5016 * ixl_ioctl is called when the user wants to configure the
5019 * return 0 on success, positive on failure
5020 **********************************************************************/
5023 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5025 struct ixl_vsi *vsi = ifp->if_softc;
5026 struct ixl_pf *pf = vsi->back;
5027 struct ifreq *ifr = (struct ifreq *)data;
5028 struct ifdrv *ifd = (struct ifdrv *)data;
5029 #if defined(INET) || defined(INET6)
5030 struct ifaddr *ifa = (struct ifaddr *)data;
5031 bool avoid_reset = FALSE;
5038 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5040 if (ifa->ifa_addr->sa_family == AF_INET)
5044 if (ifa->ifa_addr->sa_family == AF_INET6)
5047 #if defined(INET) || defined(INET6)
5049 ** Calling init results in link renegotiation,
5050 ** so we avoid doing it when possible.
5053 ifp->if_flags |= IFF_UP;
5054 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5057 if (!(ifp->if_flags & IFF_NOARP))
5058 arp_ifinit(ifp, ifa);
5061 error = ether_ioctl(ifp, command, data);
5065 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5066 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5067 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5071 ifp->if_mtu = ifr->ifr_mtu;
5072 vsi->max_frame_size =
5073 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5074 + ETHER_VLAN_ENCAP_LEN;
5075 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5076 ixl_init_locked(pf);
5081 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5083 if (ifp->if_flags & IFF_UP) {
5084 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5085 if ((ifp->if_flags ^ pf->if_flags) &
5086 (IFF_PROMISC | IFF_ALLMULTI)) {
5087 ixl_set_promisc(vsi);
5095 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5096 ixl_stop_locked(pf);
5099 pf->if_flags = ifp->if_flags;
5104 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5107 /* NVM update command */
5108 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5109 error = ixl_handle_nvmupd_cmd(pf, ifd);
5114 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5115 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5117 ixl_disable_rings_intr(vsi);
5119 ixl_enable_intr(vsi);
5124 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5125 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5127 ixl_disable_rings_intr(vsi);
5129 ixl_enable_intr(vsi);
5136 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5137 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5141 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5142 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5144 ixl_cap_txcsum_tso(vsi, ifp, mask);
5146 if (mask & IFCAP_RXCSUM)
5147 ifp->if_capenable ^= IFCAP_RXCSUM;
5148 if (mask & IFCAP_RXCSUM_IPV6)
5149 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5150 if (mask & IFCAP_LRO)
5151 ifp->if_capenable ^= IFCAP_LRO;
5152 if (mask & IFCAP_VLAN_HWTAGGING)
5153 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5154 if (mask & IFCAP_VLAN_HWFILTER)
5155 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5156 if (mask & IFCAP_VLAN_HWTSO)
5157 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5158 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5160 ixl_init_locked(pf);
5163 VLAN_CAPABILITIES(ifp);
5167 #if __FreeBSD_version >= 1003000
5170 struct ifi2creq i2c;
5173 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5177 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5180 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5184 if (i2c.len > sizeof(i2c.data)) {
5189 for (i = 0; i < i2c.len; i++)
5190 if (ixl_read_i2c_byte(pf, i2c.offset + i,
5191 i2c.dev_addr, &i2c.data[i]))
5194 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5199 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5200 error = ether_ioctl(ifp, command, data);
5208 ixl_find_i2c_interface(struct ixl_pf *pf)
5210 struct i40e_hw *hw = &pf->hw;
5211 bool i2c_en, port_matched;
5214 for (int i = 0; i < 4; i++) {
5215 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5216 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5217 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5218 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5220 if (i2c_en && port_matched)
5228 ixl_phy_type_string(u32 bit_pos, bool ext)
5230 static char * phy_types_str[32] = {
5260 "1000BASE-T Optical",
5264 static char * ext_phy_types_str[4] = {
5271 if (ext && bit_pos > 3) return "Invalid_Ext";
5272 if (bit_pos > 31) return "Invalid";
5274 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5278 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5280 device_t dev = pf->dev;
5281 struct i40e_hw *hw = &pf->hw;
5282 struct i40e_aq_desc desc;
5283 enum i40e_status_code status;
5285 struct i40e_aqc_get_link_status *aq_link_status =
5286 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5288 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5289 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5290 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5293 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5294 __func__, i40e_stat_str(hw, status),
5295 i40e_aq_str(hw, hw->aq.asq_last_status));
5299 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5304 ixl_phy_type_string_ls(u8 val)
5307 return ixl_phy_type_string(val - 0x1F, true);
5309 return ixl_phy_type_string(val, false);
5313 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5315 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5316 device_t dev = pf->dev;
5320 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5322 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5326 struct i40e_aqc_get_link_status link_status;
5327 error = ixl_aq_get_link_status(pf, &link_status);
5333 /* TODO: Add 25G types */
5334 sbuf_printf(buf, "\n"
5335 "PHY Type : 0x%02x<%s>\n"
5337 "Link info: 0x%02x\n"
5338 "AN info : 0x%02x\n"
5339 "Ext info : 0x%02x\n"
5340 "Loopback : 0x%02x\n"
5344 link_status.phy_type,
5345 ixl_phy_type_string_ls(link_status.phy_type),
5346 link_status.link_speed,
5347 link_status.link_info,
5348 link_status.an_info,
5349 link_status.ext_info,
5350 link_status.loopback,
5351 link_status.max_frame_size,
5353 link_status.power_desc);
5355 error = sbuf_finish(buf);
5357 device_printf(dev, "Error finishing sbuf: %d\n", error);
5364 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5366 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5367 struct i40e_hw *hw = &pf->hw;
5368 device_t dev = pf->dev;
5369 enum i40e_status_code status;
5370 struct i40e_aq_get_phy_abilities_resp abilities;
5374 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5376 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5380 status = i40e_aq_get_phy_capabilities(hw,
5381 FALSE, FALSE, &abilities, NULL);
5384 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5385 __func__, i40e_stat_str(hw, status),
5386 i40e_aq_str(hw, hw->aq.asq_last_status));
5391 sbuf_printf(buf, "\n"
5393 abilities.phy_type);
5395 if (abilities.phy_type != 0) {
5396 sbuf_printf(buf, "<");
5397 for (int i = 0; i < 32; i++)
5398 if ((1 << i) & abilities.phy_type)
5399 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5400 sbuf_printf(buf, ">\n");
5403 sbuf_printf(buf, "PHY Ext : %02x",
5404 abilities.phy_type_ext);
5406 if (abilities.phy_type_ext != 0) {
5407 sbuf_printf(buf, "<");
5408 for (int i = 0; i < 4; i++)
5409 if ((1 << i) & abilities.phy_type_ext)
5410 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5411 sbuf_printf(buf, ">");
5413 sbuf_printf(buf, "\n");
5421 "ID : %02x %02x %02x %02x\n"
5422 "ModType : %02x %02x %02x\n"
5426 abilities.link_speed,
5427 abilities.abilities, abilities.eee_capability,
5428 abilities.eeer_val, abilities.d3_lpan,
5429 abilities.phy_id[0], abilities.phy_id[1],
5430 abilities.phy_id[2], abilities.phy_id[3],
5431 abilities.module_type[0], abilities.module_type[1],
5432 abilities.module_type[2], abilities.phy_type_ext >> 5,
5433 abilities.phy_type_ext & 0x1F,
5434 abilities.ext_comp_code);
5436 error = sbuf_finish(buf);
5438 device_printf(dev, "Error finishing sbuf: %d\n", error);
5445 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5447 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5448 struct ixl_vsi *vsi = &pf->vsi;
5449 struct ixl_mac_filter *f;
5454 int ftl_counter = 0;
5458 SLIST_FOREACH(f, &vsi->ftl, next) {
5463 sysctl_handle_string(oidp, "(none)", 6, req);
5467 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5468 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5470 sprintf(buf_i++, "\n");
5471 SLIST_FOREACH(f, &vsi->ftl, next) {
5473 MAC_FORMAT ", vlan %4d, flags %#06x",
5474 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5476 /* don't print '\n' for last entry */
5477 if (++ftl_counter != ftl_len) {
5478 sprintf(buf_i, "\n");
5483 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5485 printf("sysctl error: %d\n", error);
5486 free(buf, M_DEVBUF);
5490 #define IXL_SW_RES_SIZE 0x14
5492 ixl_res_alloc_cmp(const void *a, const void *b)
5494 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5495 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5496 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5498 return ((int)one->resource_type - (int)two->resource_type);
5502 * Longest string length: 25
5505 ixl_switch_res_type_string(u8 type)
5507 static char * ixl_switch_res_type_strings[0x14] = {
5510 "Perfect Match MAC address",
5513 "Multicast hash entry",
5514 "Unicast hash entry",
5518 "VLAN Statistic Pool",
5521 "Inner VLAN Forward filter",
5531 return ixl_switch_res_type_strings[type];
5533 return "(Reserved)";
5537 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5539 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5540 struct i40e_hw *hw = &pf->hw;
5541 device_t dev = pf->dev;
5543 enum i40e_status_code status;
5547 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5549 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5551 device_printf(dev, "Could not allocate sbuf for output.\n");
5555 bzero(resp, sizeof(resp));
5556 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5562 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5563 __func__, i40e_stat_str(hw, status),
5564 i40e_aq_str(hw, hw->aq.asq_last_status));
5569 /* Sort entries by type for display */
5570 qsort(resp, num_entries,
5571 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5572 &ixl_res_alloc_cmp);
5574 sbuf_cat(buf, "\n");
5575 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5577 " Type | Guaranteed | Total | Used | Un-allocated\n"
5578 " | (this) | (all) | (this) | (all) \n");
5579 for (int i = 0; i < num_entries; i++) {
5581 "%25s | %10d %5d %6d %12d",
5582 ixl_switch_res_type_string(resp[i].resource_type),
5586 resp[i].total_unalloced);
5587 if (i < num_entries - 1)
5588 sbuf_cat(buf, "\n");
5591 error = sbuf_finish(buf);
5593 device_printf(dev, "Error finishing sbuf: %d\n", error);
5600 ** Caller must init and delete sbuf; this function will clear and
5601 ** finish it for caller.
5603 ** XXX: Cannot use the SEID for this, since there is no longer a
5604 ** fixed mapping between SEID and element type.
5607 ixl_switch_element_string(struct sbuf *s,
5608 struct i40e_aqc_switch_config_element_resp *element)
5612 switch (element->element_type) {
5613 case I40E_AQ_SW_ELEM_TYPE_MAC:
5614 sbuf_printf(s, "MAC %3d", element->element_info);
5616 case I40E_AQ_SW_ELEM_TYPE_PF:
5617 sbuf_printf(s, "PF %3d", element->element_info);
5619 case I40E_AQ_SW_ELEM_TYPE_VF:
5620 sbuf_printf(s, "VF %3d", element->element_info);
5622 case I40E_AQ_SW_ELEM_TYPE_EMP:
5625 case I40E_AQ_SW_ELEM_TYPE_BMC:
5628 case I40E_AQ_SW_ELEM_TYPE_PV:
5631 case I40E_AQ_SW_ELEM_TYPE_VEB:
5634 case I40E_AQ_SW_ELEM_TYPE_PA:
5637 case I40E_AQ_SW_ELEM_TYPE_VSI:
5638 sbuf_printf(s, "VSI %3d", element->element_info);
5646 return sbuf_data(s);
5650 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5652 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5653 struct i40e_hw *hw = &pf->hw;
5654 device_t dev = pf->dev;
5657 enum i40e_status_code status;
5660 u8 aq_buf[I40E_AQ_LARGE_BUF];
5662 struct i40e_aqc_get_switch_config_resp *sw_config;
5663 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5665 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5667 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5671 status = i40e_aq_get_switch_config(hw, sw_config,
5672 sizeof(aq_buf), &next, NULL);
5675 "%s: aq_get_switch_config() error %s, aq error %s\n",
5676 __func__, i40e_stat_str(hw, status),
5677 i40e_aq_str(hw, hw->aq.asq_last_status));
5682 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5685 nmbuf = sbuf_new_auto();
5687 device_printf(dev, "Could not allocate sbuf for name output.\n");
5692 sbuf_cat(buf, "\n");
5693 /* Assuming <= 255 elements in switch */
5694 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5695 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5697 ** Revision -- all elements are revision 1 for now
5700 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5701 " | | | (uplink)\n");
5702 for (int i = 0; i < sw_config->header.num_reported; i++) {
5703 // "%4d (%8s) | %8s %8s %#8x",
5704 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5706 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5707 &sw_config->element[i]));
5708 sbuf_cat(buf, " | ");
5709 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5711 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5713 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5714 if (i < sw_config->header.num_reported - 1)
5715 sbuf_cat(buf, "\n");
5719 error = sbuf_finish(buf);
5721 device_printf(dev, "Error finishing sbuf: %d\n", error);
5729 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5731 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5732 struct i40e_hw *hw = &pf->hw;
5733 device_t dev = pf->dev;
5736 enum i40e_status_code status;
5739 struct i40e_aqc_get_set_rss_key_data key_data;
5741 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5743 device_printf(dev, "Could not allocate sbuf for output.\n");
5747 sbuf_cat(buf, "\n");
5748 if (hw->mac.type == I40E_MAC_X722) {
5749 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5750 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5752 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5753 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5754 sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5756 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5757 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5758 sbuf_printf(buf, "%4D", (u_char *)®, "");
5762 error = sbuf_finish(buf);
5764 device_printf(dev, "Error finishing sbuf: %d\n", error);
5771 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5773 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5774 struct i40e_hw *hw = &pf->hw;
5775 device_t dev = pf->dev;
5778 enum i40e_status_code status;
5782 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5784 device_printf(dev, "Could not allocate sbuf for output.\n");
5788 sbuf_cat(buf, "\n");
5789 if (hw->mac.type == I40E_MAC_X722) {
5790 bzero(hlut, sizeof(hlut));
5791 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5793 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5794 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5795 sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5797 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5798 reg = rd32(hw, I40E_PFQF_HLUT(i));
5799 sbuf_printf(buf, "%4D", (u_char *)®, "");
5803 error = sbuf_finish(buf);
5805 device_printf(dev, "Error finishing sbuf: %d\n", error);
5812 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
5814 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5815 struct i40e_hw *hw = &pf->hw;
5818 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
5819 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
5821 return sysctl_handle_long(oidp, NULL, hena, req);
5825 * Sysctl to disable firmware's link management
5827 * 1 - Disable link management on this port
5828 * 0 - Re-enable link management
5830 * On normal NVMs, firmware manages link by default.
5833 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
5835 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5836 struct i40e_hw *hw = &pf->hw;
5837 device_t dev = pf->dev;
5838 int requested_mode = -1;
5839 enum i40e_status_code status = 0;
5842 /* Read in new mode */
5843 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
5844 if ((error) || (req->newptr == NULL))
5846 /* Check for sane value */
5847 if (requested_mode < 0 || requested_mode > 1) {
5848 device_printf(dev, "Valid modes are 0 or 1\n");
5853 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
5856 "%s: Error setting new phy debug mode %s,"
5857 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
5858 i40e_aq_str(hw, hw->aq.asq_last_status));
5866 * Sysctl to read a byte from I2C bus.
5868 * Input: 32-bit value:
5869 * bits 0-7: device address (0xA0 or 0xA2)
5870 * bits 8-15: offset (0-255)
5871 * bits 16-31: unused
5872 * Output: 8-bit value read
5875 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
5877 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5878 device_t dev = pf->dev;
5879 int input = -1, error = 0;
5881 device_printf(dev, "%s: start\n", __func__);
5883 u8 dev_addr, offset, output;
5885 /* Read in I2C read parameters */
5886 error = sysctl_handle_int(oidp, &input, 0, req);
5887 if ((error) || (req->newptr == NULL))
5889 /* Validate device address */
5890 dev_addr = input & 0xFF;
5891 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5894 offset = (input >> 8) & 0xFF;
5896 error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
5900 device_printf(dev, "%02X\n", output);
5905 * Sysctl to write a byte to the I2C bus.
5907 * Input: 32-bit value:
5908 * bits 0-7: device address (0xA0 or 0xA2)
5909 * bits 8-15: offset (0-255)
5910 * bits 16-23: value to write
5911 * bits 24-31: unused
5912 * Output: 8-bit value written
5915 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
5917 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5918 device_t dev = pf->dev;
5919 int input = -1, error = 0;
5921 u8 dev_addr, offset, value;
5923 /* Read in I2C write parameters */
5924 error = sysctl_handle_int(oidp, &input, 0, req);
5925 if ((error) || (req->newptr == NULL))
5927 /* Validate device address */
5928 dev_addr = input & 0xFF;
5929 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5932 offset = (input >> 8) & 0xFF;
5933 value = (input >> 16) & 0xFF;
5935 error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
5939 device_printf(dev, "%02X written\n", value);
5944 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5945 u8 bit_pos, int *is_set)
5947 device_t dev = pf->dev;
5948 struct i40e_hw *hw = &pf->hw;
5949 enum i40e_status_code status;
5951 status = i40e_aq_get_phy_capabilities(hw,
5952 FALSE, FALSE, abilities, NULL);
5955 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5956 __func__, i40e_stat_str(hw, status),
5957 i40e_aq_str(hw, hw->aq.asq_last_status));
5961 *is_set = !!(abilities->phy_type_ext & bit_pos);
5966 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5967 u8 bit_pos, int set)
5969 device_t dev = pf->dev;
5970 struct i40e_hw *hw = &pf->hw;
5971 struct i40e_aq_set_phy_config config;
5972 enum i40e_status_code status;
5974 /* Set new PHY config */
5975 memset(&config, 0, sizeof(config));
5976 config.fec_config = abilities->phy_type_ext & ~(bit_pos);
5978 config.fec_config |= bit_pos;
5979 if (config.fec_config != abilities->phy_type_ext) {
5980 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5981 config.phy_type = abilities->phy_type;
5982 config.phy_type_ext = abilities->phy_type_ext;
5983 config.link_speed = abilities->link_speed;
5984 config.eee_capability = abilities->eee_capability;
5985 config.eeer = abilities->eeer_val;
5986 config.low_power_ctrl = abilities->d3_lpan;
5987 status = i40e_aq_set_phy_config(hw, &config, NULL);
5991 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
5992 __func__, i40e_stat_str(hw, status),
5993 i40e_aq_str(hw, hw->aq.asq_last_status));
6002 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6004 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6005 int mode, error = 0;
6007 struct i40e_aq_get_phy_abilities_resp abilities;
6008 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
6011 /* Read in new mode */
6012 error = sysctl_handle_int(oidp, &mode, 0, req);
6013 if ((error) || (req->newptr == NULL))
6016 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6020 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6022 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6023 int mode, error = 0;
6025 struct i40e_aq_get_phy_abilities_resp abilities;
6026 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
6029 /* Read in new mode */
6030 error = sysctl_handle_int(oidp, &mode, 0, req);
6031 if ((error) || (req->newptr == NULL))
6034 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6038 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6040 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6041 int mode, error = 0;
6043 struct i40e_aq_get_phy_abilities_resp abilities;
6044 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
6047 /* Read in new mode */
6048 error = sysctl_handle_int(oidp, &mode, 0, req);
6049 if ((error) || (req->newptr == NULL))
6052 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6056 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6058 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6059 int mode, error = 0;
6061 struct i40e_aq_get_phy_abilities_resp abilities;
6062 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
6065 /* Read in new mode */
6066 error = sysctl_handle_int(oidp, &mode, 0, req);
6067 if ((error) || (req->newptr == NULL))
6070 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6074 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6076 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6077 int mode, error = 0;
6079 struct i40e_aq_get_phy_abilities_resp abilities;
6080 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
6083 /* Read in new mode */
6084 error = sysctl_handle_int(oidp, &mode, 0, req);
6085 if ((error) || (req->newptr == NULL))
6088 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));