1 /******************************************************************************
3 Copyright (c) 2013-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
53 static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
54 static u64 ixl_max_aq_speed_to_value(u8);
55 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
56 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
59 static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
88 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
89 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
93 extern int ixl_enable_iwarp;
94 extern int ixl_limit_iwarp_msix;
97 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
98 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
100 const char * const ixl_fc_string[6] = {
109 static char *ixl_fec_string[3] = {
111 "CL74 FC-FEC/BASE-R",
115 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
118 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
122 if (!(mask & pf->dbg_mask))
125 /* Re-implement device_printf() */
126 device_print_prettyname(pf->dev);
133 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
136 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
138 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
139 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
140 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
143 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
144 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
145 hw->aq.api_maj_ver, hw->aq.api_min_ver,
146 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
147 IXL_NVM_VERSION_HI_SHIFT,
148 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
149 IXL_NVM_VERSION_LO_SHIFT,
151 oem_ver, oem_build, oem_patch);
155 ixl_print_nvm_version(struct ixl_pf *pf)
157 struct i40e_hw *hw = &pf->hw;
158 device_t dev = pf->dev;
161 sbuf = sbuf_new_auto();
162 ixl_nvm_version_str(hw, sbuf);
164 device_printf(dev, "%s\n", sbuf_data(sbuf));
169 ixl_configure_tx_itr(struct ixl_pf *pf)
171 struct i40e_hw *hw = &pf->hw;
172 struct ixl_vsi *vsi = &pf->vsi;
173 struct ixl_queue *que = vsi->queues;
175 vsi->tx_itr_setting = pf->tx_itr;
177 for (int i = 0; i < vsi->num_queues; i++, que++) {
178 struct tx_ring *txr = &que->txr;
180 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
181 vsi->tx_itr_setting);
182 txr->itr = vsi->tx_itr_setting;
183 txr->latency = IXL_AVE_LATENCY;
188 ixl_configure_rx_itr(struct ixl_pf *pf)
190 struct i40e_hw *hw = &pf->hw;
191 struct ixl_vsi *vsi = &pf->vsi;
192 struct ixl_queue *que = vsi->queues;
194 vsi->rx_itr_setting = pf->rx_itr;
196 for (int i = 0; i < vsi->num_queues; i++, que++) {
197 struct rx_ring *rxr = &que->rxr;
199 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
200 vsi->rx_itr_setting);
201 rxr->itr = vsi->rx_itr_setting;
202 rxr->latency = IXL_AVE_LATENCY;
207 * Write PF ITR values to queue ITR registers.
210 ixl_configure_itr(struct ixl_pf *pf)
212 ixl_configure_tx_itr(pf);
213 ixl_configure_rx_itr(pf);
217 /*********************************************************************
220 * This routine is used in two ways. It is used by the stack as
221 * init entry point in network interface structure. It is also used
222 * by the driver as a hw/sw initialization routine to get to a
225 * return 0 on success, positive on failure
226 **********************************************************************/
228 ixl_init_locked(struct ixl_pf *pf)
230 struct i40e_hw *hw = &pf->hw;
231 struct ixl_vsi *vsi = &pf->vsi;
232 struct ifnet *ifp = vsi->ifp;
233 device_t dev = pf->dev;
234 struct i40e_filter_control_settings filter;
235 u8 tmpaddr[ETHER_ADDR_LEN];
238 INIT_DEBUGOUT("ixl_init_locked: begin");
239 IXL_PF_LOCK_ASSERT(pf);
244 * If the aq is dead here, it probably means something outside of the driver
245 * did something to the adapter, like a PF reset.
246 * So rebuild the driver's state here if that occurs.
248 if (!i40e_check_asq_alive(&pf->hw)) {
249 device_printf(dev, "Admin Queue is down; resetting...\n");
250 ixl_teardown_hw_structs(pf);
254 /* Get the latest mac address... User might use a LAA */
255 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
257 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
258 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
259 device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n");
260 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
261 bcopy(tmpaddr, hw->mac.addr,
263 ret = i40e_aq_mac_address_write(hw,
264 I40E_AQC_WRITE_TYPE_LAA_ONLY,
267 device_printf(dev, "LLA address"
268 "change failed!!\n");
273 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
275 /* Set the various hardware offload abilities */
276 ifp->if_hwassist = 0;
277 if (ifp->if_capenable & IFCAP_TSO)
278 ifp->if_hwassist |= CSUM_TSO;
279 if (ifp->if_capenable & IFCAP_TXCSUM)
280 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
281 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
282 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
284 /* Set up the device filtering */
285 bzero(&filter, sizeof(filter));
286 filter.enable_ethtype = TRUE;
287 filter.enable_macvlan = TRUE;
288 filter.enable_fdir = FALSE;
289 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
290 if (i40e_set_filter_control(hw, &filter))
291 device_printf(dev, "i40e_set_filter_control() failed\n");
293 /* Prepare the VSI: rings, hmc contexts, etc... */
294 if (ixl_initialize_vsi(vsi)) {
295 device_printf(dev, "initialize vsi failed!!\n");
302 /* Add protocol filters to list */
303 ixl_init_filters(vsi);
305 /* Setup vlan's if needed */
306 ixl_setup_vlan_filters(vsi);
308 /* Set up MSI/X routing and the ITR settings */
310 ixl_configure_queue_intr_msix(pf);
311 ixl_configure_itr(pf);
313 ixl_configure_legacy(pf);
315 ixl_enable_rings(vsi);
317 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
319 ixl_reconfigure_filters(vsi);
321 /* And now turn on interrupts */
322 ixl_enable_intr(vsi);
325 hw->phy.get_link_info = TRUE;
326 i40e_get_link_status(hw, &pf->link_up);
327 ixl_update_link_status(pf);
329 /* Start the local timer */
330 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
332 /* Now inform the stack we're ready */
333 ifp->if_drv_flags |= IFF_DRV_RUNNING;
336 if (ixl_enable_iwarp && pf->iw_enabled) {
337 ret = ixl_iw_pf_init(pf);
340 "initialize iwarp failed, code %d\n", ret);
346 /*********************************************************************
348 * Get the hardware capabilities
350 **********************************************************************/
353 ixl_get_hw_capabilities(struct ixl_pf *pf)
355 struct i40e_aqc_list_capabilities_element_resp *buf;
356 struct i40e_hw *hw = &pf->hw;
357 device_t dev = pf->dev;
362 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
364 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
365 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
366 device_printf(dev, "Unable to allocate cap memory\n");
370 /* This populates the hw struct */
371 error = i40e_aq_discover_capabilities(hw, buf, len,
372 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
374 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
376 /* retry once with a larger buffer */
380 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
381 device_printf(dev, "capability discovery failed: %d\n",
382 pf->hw.aq.asq_last_status);
386 /* Capture this PF's starting queue pair */
387 pf->qbase = hw->func_caps.base_queue;
390 device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
391 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
392 hw->pf_id, hw->func_caps.num_vfs,
393 hw->func_caps.num_msix_vectors,
394 hw->func_caps.num_msix_vectors_vf,
395 hw->func_caps.fd_filters_guaranteed,
396 hw->func_caps.fd_filters_best_effort,
397 hw->func_caps.num_tx_qp,
398 hw->func_caps.num_rx_qp,
399 hw->func_caps.base_queue);
401 struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
402 osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
403 if (osdep->i2c_intfc_num != -1)
406 /* Print a subset of the capability information. */
407 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
408 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
409 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
410 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
411 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
412 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
419 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
421 device_t dev = vsi->dev;
423 /* Enable/disable TXCSUM/TSO4 */
424 if (!(ifp->if_capenable & IFCAP_TXCSUM)
425 && !(ifp->if_capenable & IFCAP_TSO4)) {
426 if (mask & IFCAP_TXCSUM) {
427 ifp->if_capenable |= IFCAP_TXCSUM;
428 /* enable TXCSUM, restore TSO if previously enabled */
429 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
430 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
431 ifp->if_capenable |= IFCAP_TSO4;
434 else if (mask & IFCAP_TSO4) {
435 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
436 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
438 "TSO4 requires txcsum, enabling both...\n");
440 } else if((ifp->if_capenable & IFCAP_TXCSUM)
441 && !(ifp->if_capenable & IFCAP_TSO4)) {
442 if (mask & IFCAP_TXCSUM)
443 ifp->if_capenable &= ~IFCAP_TXCSUM;
444 else if (mask & IFCAP_TSO4)
445 ifp->if_capenable |= IFCAP_TSO4;
446 } else if((ifp->if_capenable & IFCAP_TXCSUM)
447 && (ifp->if_capenable & IFCAP_TSO4)) {
448 if (mask & IFCAP_TXCSUM) {
449 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
450 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
452 "TSO4 requires txcsum, disabling both...\n");
453 } else if (mask & IFCAP_TSO4)
454 ifp->if_capenable &= ~IFCAP_TSO4;
457 /* Enable/disable TXCSUM_IPV6/TSO6 */
458 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
459 && !(ifp->if_capenable & IFCAP_TSO6)) {
460 if (mask & IFCAP_TXCSUM_IPV6) {
461 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
462 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
463 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
464 ifp->if_capenable |= IFCAP_TSO6;
466 } else if (mask & IFCAP_TSO6) {
467 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
468 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
470 "TSO6 requires txcsum6, enabling both...\n");
472 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
473 && !(ifp->if_capenable & IFCAP_TSO6)) {
474 if (mask & IFCAP_TXCSUM_IPV6)
475 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
476 else if (mask & IFCAP_TSO6)
477 ifp->if_capenable |= IFCAP_TSO6;
478 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
479 && (ifp->if_capenable & IFCAP_TSO6)) {
480 if (mask & IFCAP_TXCSUM_IPV6) {
481 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
482 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
484 "TSO6 requires txcsum6, disabling both...\n");
485 } else if (mask & IFCAP_TSO6)
486 ifp->if_capenable &= ~IFCAP_TSO6;
490 /* For the set_advertise sysctl */
492 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
494 device_t dev = pf->dev;
497 /* Make sure to initialize the device to the complete list of
498 * supported speeds on driver load, to ensure unloading and
499 * reloading the driver will restore this value.
501 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
503 /* Non-fatal error */
504 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
509 pf->advertised_speed =
510 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
514 ixl_teardown_hw_structs(struct ixl_pf *pf)
516 enum i40e_status_code status = 0;
517 struct i40e_hw *hw = &pf->hw;
518 device_t dev = pf->dev;
520 /* Shutdown LAN HMC */
521 if (hw->hmc.hmc_obj) {
522 status = i40e_shutdown_lan_hmc(hw);
525 "init: LAN HMC shutdown failure; status %d\n", status);
530 /* Shutdown admin queue */
531 ixl_disable_intr0(hw);
532 status = i40e_shutdown_adminq(hw);
535 "init: Admin Queue shutdown failure; status %d\n", status);
542 ixl_reset(struct ixl_pf *pf)
544 struct i40e_hw *hw = &pf->hw;
545 device_t dev = pf->dev;
549 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
551 error = i40e_pf_reset(hw);
553 device_printf(dev, "init: PF reset failure\n");
558 error = i40e_init_adminq(hw);
560 device_printf(dev, "init: Admin queue init failure;"
561 " status code %d\n", error);
566 i40e_clear_pxe_mode(hw);
568 error = ixl_get_hw_capabilities(pf);
570 device_printf(dev, "init: Error retrieving HW capabilities;"
571 " status code %d\n", error);
575 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
576 hw->func_caps.num_rx_qp, 0, 0);
578 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
584 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
586 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
592 // XXX: possible fix for panic, but our failure recovery is still broken
593 error = ixl_switch_config(pf);
595 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
600 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
603 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
604 " aq_err %d\n", error, hw->aq.asq_last_status);
609 error = i40e_set_fc(hw, &set_fc_err_mask, true);
611 device_printf(dev, "init: setting link flow control failed; retcode %d,"
612 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
616 // XXX: (Rebuild VSIs?)
618 /* Firmware delay workaround */
619 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
620 (hw->aq.fw_maj_ver < 4)) {
622 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
624 device_printf(dev, "init: link restart failed, aq_err %d\n",
625 hw->aq.asq_last_status);
631 /* Re-enable admin queue interrupt */
633 ixl_configure_intr0_msix(pf);
634 ixl_enable_intr0(hw);
642 ** MSIX Interrupt Handlers and Tasklets
645 ixl_handle_que(void *context, int pending)
647 struct ixl_queue *que = context;
648 struct ixl_vsi *vsi = que->vsi;
649 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
650 struct i40e_hw *hw = vsi->hw;
651 struct tx_ring *txr = &que->txr;
652 struct ifnet *ifp = vsi->ifp;
655 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
656 more = ixl_rxeof(que, IXL_RX_LIMIT);
659 if (!drbr_empty(ifp, txr->br))
660 ixl_mq_start_locked(ifp, txr);
663 taskqueue_enqueue(que->tq, &que->task);
668 /* Re-enable queue interrupt */
670 ixl_enable_queue(hw, que->me);
672 ixl_enable_intr0(hw);
676 /*********************************************************************
678 * Legacy Interrupt Service routine
680 **********************************************************************/
684 struct ixl_pf *pf = arg;
685 struct i40e_hw *hw = &pf->hw;
686 struct ixl_vsi *vsi = &pf->vsi;
687 struct ixl_queue *que = vsi->queues;
688 struct ifnet *ifp = vsi->ifp;
689 struct tx_ring *txr = &que->txr;
695 /* Clear PBA at start of ISR if using legacy interrupts */
697 wr32(hw, I40E_PFINT_DYN_CTL0,
698 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
699 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
701 icr0 = rd32(hw, I40E_PFINT_ICR0);
705 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
706 taskqueue_enqueue(pf->tq, &pf->vflr_task);
709 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
710 taskqueue_enqueue(pf->tq, &pf->adminq);
712 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
715 more = ixl_rxeof(que, IXL_RX_LIMIT);
719 if (!drbr_empty(vsi->ifp, txr->br))
720 ixl_mq_start_locked(ifp, txr);
724 taskqueue_enqueue(que->tq, &que->task);
727 ixl_enable_intr0(hw);
731 /*********************************************************************
733 * MSIX VSI Interrupt Service routine
735 **********************************************************************/
737 ixl_msix_que(void *arg)
739 struct ixl_queue *que = arg;
740 struct ixl_vsi *vsi = que->vsi;
741 struct i40e_hw *hw = vsi->hw;
742 struct tx_ring *txr = &que->txr;
743 bool more_tx, more_rx;
745 /* Protect against spurious interrupts */
746 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
751 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
754 more_tx = ixl_txeof(que);
756 ** Make certain that if the stack
757 ** has anything queued the task gets
758 ** scheduled to handle it.
760 if (!drbr_empty(vsi->ifp, txr->br))
764 ixl_set_queue_rx_itr(que);
765 ixl_set_queue_tx_itr(que);
767 if (more_tx || more_rx)
768 taskqueue_enqueue(que->tq, &que->task);
770 ixl_enable_queue(hw, que->me);
776 /*********************************************************************
778 * MSIX Admin Queue Interrupt Service routine
780 **********************************************************************/
782 ixl_msix_adminq(void *arg)
784 struct ixl_pf *pf = arg;
785 struct i40e_hw *hw = &pf->hw;
786 device_t dev = pf->dev;
787 u32 reg, mask, rstat_reg;
788 bool do_task = FALSE;
792 reg = rd32(hw, I40E_PFINT_ICR0);
793 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
795 /* Check on the cause */
796 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
797 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
801 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
802 ixl_handle_mdd_event(pf);
803 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
806 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
807 device_printf(dev, "Reset Requested!\n");
808 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
809 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
810 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
811 device_printf(dev, "Reset type: ");
813 /* These others might be handled similarly to an EMPR reset */
814 case I40E_RESET_CORER:
817 case I40E_RESET_GLOBR:
820 case I40E_RESET_EMPR:
822 atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
828 /* overload admin queue task to check reset progress */
832 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
833 device_printf(dev, "ECC Error detected!\n");
836 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
837 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
838 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
839 device_printf(dev, "HMC Error detected!\n");
840 device_printf(dev, "INFO 0x%08x\n", reg);
841 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
842 device_printf(dev, "DATA 0x%08x\n", reg);
843 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
847 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
848 device_printf(dev, "PCI Exception detected!\n");
852 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
853 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
854 taskqueue_enqueue(pf->tq, &pf->vflr_task);
859 taskqueue_enqueue(pf->tq, &pf->adminq);
861 ixl_enable_intr0(hw);
865 ixl_set_promisc(struct ixl_vsi *vsi)
867 struct ifnet *ifp = vsi->ifp;
868 struct i40e_hw *hw = vsi->hw;
870 bool uni = FALSE, multi = FALSE;
872 if (ifp->if_flags & IFF_PROMISC)
874 else if (ifp->if_flags & IFF_ALLMULTI)
876 else { /* Need to count the multicast addresses */
877 struct ifmultiaddr *ifma;
879 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
880 if (ifma->ifma_addr->sa_family != AF_LINK)
882 if (mcnt == MAX_MULTICAST_ADDR) {
888 if_maddr_runlock(ifp);
891 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
892 vsi->seid, uni, NULL, TRUE);
893 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
894 vsi->seid, multi, NULL);
898 /*********************************************************************
901 * Routines for multicast and vlan filter management.
903 *********************************************************************/
905 ixl_add_multi(struct ixl_vsi *vsi)
907 struct ifmultiaddr *ifma;
908 struct ifnet *ifp = vsi->ifp;
909 struct i40e_hw *hw = vsi->hw;
912 IOCTL_DEBUGOUT("ixl_add_multi: begin");
916 ** First just get a count, to decide if we
917 ** we simply use multicast promiscuous.
919 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
920 if (ifma->ifma_addr->sa_family != AF_LINK)
924 if_maddr_runlock(ifp);
926 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
927 /* delete existing MC filters */
928 ixl_del_hw_filters(vsi, mcnt);
929 i40e_aq_set_vsi_multicast_promiscuous(hw,
930 vsi->seid, TRUE, NULL);
936 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
937 if (ifma->ifma_addr->sa_family != AF_LINK)
939 ixl_add_mc_filter(vsi,
940 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
943 if_maddr_runlock(ifp);
945 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
946 ixl_add_hw_filters(vsi, flags, mcnt);
949 IOCTL_DEBUGOUT("ixl_add_multi: end");
954 ixl_del_multi(struct ixl_vsi *vsi)
956 struct ifnet *ifp = vsi->ifp;
957 struct ifmultiaddr *ifma;
958 struct ixl_mac_filter *f;
962 IOCTL_DEBUGOUT("ixl_del_multi: begin");
964 /* Search for removed multicast addresses */
966 SLIST_FOREACH(f, &vsi->ftl, next) {
967 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
969 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
970 if (ifma->ifma_addr->sa_family != AF_LINK)
972 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
973 if (cmp_etheraddr(f->macaddr, mc_addr)) {
978 if (match == FALSE) {
979 f->flags |= IXL_FILTER_DEL;
984 if_maddr_runlock(ifp);
987 ixl_del_hw_filters(vsi, mcnt);
990 /*********************************************************************
993 * This routine checks for link status, updates statistics,
994 * and runs the watchdog check.
996 * Only runs when the driver is configured UP and RUNNING.
998 **********************************************************************/
1001 ixl_local_timer(void *arg)
1003 struct ixl_pf *pf = arg;
1005 IXL_PF_LOCK_ASSERT(pf);
1007 /* Fire off the adminq task */
1008 taskqueue_enqueue(pf->tq, &pf->adminq);
1011 ixl_update_stats_counters(pf);
1013 /* Increment stat when a queue shows hung */
1014 if (ixl_queue_hang_check(&pf->vsi))
1015 pf->watchdog_events++;
1017 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1021 ixl_link_up_msg(struct ixl_pf *pf)
1023 struct i40e_hw *hw = &pf->hw;
1024 struct ifnet *ifp = pf->vsi.ifp;
1025 char *req_fec_string, *neg_fec_string;
1028 fec_abilities = hw->phy.link_info.req_fec_info;
1029 /* If both RS and KR are requested, only show RS */
1030 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
1031 req_fec_string = ixl_fec_string[0];
1032 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
1033 req_fec_string = ixl_fec_string[1];
1035 req_fec_string = ixl_fec_string[2];
1037 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
1038 neg_fec_string = ixl_fec_string[0];
1039 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
1040 neg_fec_string = ixl_fec_string[1];
1042 neg_fec_string = ixl_fec_string[2];
1044 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
1046 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1047 req_fec_string, neg_fec_string,
1048 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1049 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1050 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1051 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1052 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1053 ixl_fc_string[1] : ixl_fc_string[0]);
1057 ** Note: this routine updates the OS on the link state
1058 ** the real check of the hardware only happens with
1059 ** a link interrupt.
1062 ixl_update_link_status(struct ixl_pf *pf)
1064 struct ixl_vsi *vsi = &pf->vsi;
1065 struct ifnet *ifp = vsi->ifp;
1066 device_t dev = pf->dev;
1069 if (vsi->link_active == FALSE) {
1070 vsi->link_active = TRUE;
1071 #if __FreeBSD_version >= 1100000
1072 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1074 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
1076 if_link_state_change(ifp, LINK_STATE_UP);
1077 ixl_link_up_msg(pf);
1079 ixl_broadcast_link_state(pf);
1082 } else { /* Link down */
1083 if (vsi->link_active == TRUE) {
1085 device_printf(dev, "Link is Down\n");
1086 if_link_state_change(ifp, LINK_STATE_DOWN);
1087 vsi->link_active = FALSE;
1089 ixl_broadcast_link_state(pf);
1095 /*********************************************************************
1097 * This routine disables all traffic on the adapter by issuing a
1098 * global reset on the MAC and deallocates TX/RX buffers.
1100 **********************************************************************/
1103 ixl_stop_locked(struct ixl_pf *pf)
1105 struct ixl_vsi *vsi = &pf->vsi;
1106 struct ifnet *ifp = vsi->ifp;
1108 INIT_DEBUGOUT("ixl_stop: begin\n");
1110 IXL_PF_LOCK_ASSERT(pf);
1113 /* Stop iWARP device */
1114 if (ixl_enable_iwarp && pf->iw_enabled)
1118 /* Stop the local timer */
1119 callout_stop(&pf->timer);
1121 ixl_disable_rings_intr(vsi);
1122 ixl_disable_rings(vsi);
1124 /* Tell the stack that the interface is no longer active */
1125 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1129 ixl_stop(struct ixl_pf *pf)
1132 ixl_stop_locked(pf);
1136 /*********************************************************************
1138 * Setup MSIX Interrupt resources and handlers for the VSI
1140 **********************************************************************/
1142 ixl_setup_legacy(struct ixl_pf *pf)
1144 device_t dev = pf->dev;
1149 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1150 &rid, RF_SHAREABLE | RF_ACTIVE);
1151 if (pf->res == NULL) {
1152 device_printf(dev, "bus_alloc_resource_any() for"
1153 " legacy/msi interrupt\n");
1157 /* Set the handler function */
1158 error = bus_setup_intr(dev, pf->res,
1159 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1160 ixl_intr, pf, &pf->tag);
1163 device_printf(dev, "bus_setup_intr() for legacy/msi"
1164 " interrupt handler failed, error %d\n", error);
1167 error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1170 device_printf(dev, "bus_describe_intr() for Admin Queue"
1171 " interrupt name failed, error %d\n", error);
1178 ixl_setup_adminq_tq(struct ixl_pf *pf)
1180 device_t dev = pf->dev;
1183 /* Tasklet for Admin Queue interrupts */
1184 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1187 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1189 /* Create and start Admin Queue taskqueue */
1190 pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1191 taskqueue_thread_enqueue, &pf->tq);
1193 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1196 error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1197 device_get_nameunit(dev));
1199 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1201 taskqueue_free(pf->tq);
1208 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1210 struct ixl_queue *que = vsi->queues;
1211 device_t dev = vsi->dev;
1217 /* Create queue tasks and start queue taskqueues */
1218 for (int i = 0; i < vsi->num_queues; i++, que++) {
1219 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1220 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1221 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1222 taskqueue_thread_enqueue, &que->tq);
1224 CPU_SETOF(cpu_id, &cpu_mask);
1225 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1226 &cpu_mask, "%s (bucket %d)",
1227 device_get_nameunit(dev), cpu_id);
1229 taskqueue_start_threads(&que->tq, 1, PI_NET,
1230 "%s (que %d)", device_get_nameunit(dev), que->me);
1238 ixl_free_adminq_tq(struct ixl_pf *pf)
1241 taskqueue_free(pf->tq);
1247 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1249 struct ixl_queue *que = vsi->queues;
1251 for (int i = 0; i < vsi->num_queues; i++, que++) {
1253 taskqueue_free(que->tq);
1260 ixl_setup_adminq_msix(struct ixl_pf *pf)
1262 device_t dev = pf->dev;
1265 /* Admin IRQ rid is 1, vector is 0 */
1267 /* Get interrupt resource from bus */
1268 pf->res = bus_alloc_resource_any(dev,
1269 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1271 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1272 " interrupt failed [rid=%d]\n", rid);
1275 /* Then associate interrupt with handler */
1276 error = bus_setup_intr(dev, pf->res,
1277 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1278 ixl_msix_adminq, pf, &pf->tag);
1281 device_printf(dev, "bus_setup_intr() for Admin Queue"
1282 " interrupt handler failed, error %d\n", error);
1285 error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1288 device_printf(dev, "bus_describe_intr() for Admin Queue"
1289 " interrupt name failed, error %d\n", error);
1297 * Allocate interrupt resources from bus and associate an interrupt handler
1298 * to those for the VSI's queues.
1301 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1303 device_t dev = vsi->dev;
1304 struct ixl_queue *que = vsi->queues;
1305 struct tx_ring *txr;
1306 int error, rid, vector = 1;
1308 /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1309 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1313 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1314 RF_SHAREABLE | RF_ACTIVE);
1316 device_printf(dev, "bus_alloc_resource_any() for"
1317 " Queue %d interrupt failed [rid=%d]\n",
1321 /* Set the handler function */
1322 error = bus_setup_intr(dev, que->res,
1323 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1324 ixl_msix_que, que, &que->tag);
1326 device_printf(dev, "bus_setup_intr() for Queue %d"
1327 " interrupt handler failed, error %d\n",
1329 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1332 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1334 device_printf(dev, "bus_describe_intr() for Queue %d"
1335 " interrupt name failed, error %d\n",
1338 /* Bind the vector to a CPU */
1340 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1342 error = bus_bind_intr(dev, que->res, cpu_id);
1344 device_printf(dev, "bus_bind_intr() for Queue %d"
1345 " to CPU %d failed, error %d\n",
1346 que->me, cpu_id, error);
1355 * Allocate MSI/X vectors from the OS.
1356 * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1359 ixl_init_msix(struct ixl_pf *pf)
1361 device_t dev = pf->dev;
1362 struct i40e_hw *hw = &pf->hw;
1364 #if __FreeBSD_version >= 1100000
1368 int auto_max_queues;
1369 int rid, want, vectors, queues, available;
1371 int iw_want=0, iw_vectors;
1376 /* Override by tuneable */
1377 if (!pf->enable_msix)
1380 /* First try MSI/X */
1381 rid = PCIR_BAR(IXL_MSIX_BAR);
1382 pf->msix_mem = bus_alloc_resource_any(dev,
1383 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1384 if (!pf->msix_mem) {
1385 /* May not be enabled */
1386 device_printf(pf->dev,
1387 "Unable to map MSIX table\n");
1391 available = pci_msix_count(dev);
1392 if (available < 2) {
1393 /* system has msix disabled (0), or only one vector (1) */
1394 device_printf(pf->dev, "Less than two MSI-X vectors available\n");
1395 bus_release_resource(dev, SYS_RES_MEMORY,
1397 pf->msix_mem = NULL;
1401 /* Clamp max number of queues based on:
1402 * - # of MSI-X vectors available
1403 * - # of cpus available
1404 * - # of queues that can be assigned to the LAN VSI
1406 auto_max_queues = min(mp_ncpus, available - 1);
1407 if (hw->mac.type == I40E_MAC_X722)
1408 auto_max_queues = min(auto_max_queues, 128);
1410 auto_max_queues = min(auto_max_queues, 64);
1412 /* Override with tunable value if tunable is less than autoconfig count */
1413 if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1414 queues = pf->max_queues;
1415 /* Use autoconfig amount if that's lower */
1416 else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1417 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1418 "autoconfig amount (%d)...\n",
1419 pf->max_queues, auto_max_queues);
1420 queues = auto_max_queues;
1422 /* Limit maximum auto-configured queues to 8 if no user value is set */
1424 queues = min(auto_max_queues, 8);
1427 /* If we're doing RSS, clamp at the number of RSS buckets */
1428 if (queues > rss_getnumbuckets())
1429 queues = rss_getnumbuckets();
1433 ** Want one vector (RX/TX pair) per queue
1434 ** plus an additional for the admin queue.
1437 if (want <= available) /* Have enough */
1440 device_printf(pf->dev,
1441 "MSIX Configuration Problem, "
1442 "%d vectors available but %d wanted!\n",
1444 pf->msix_mem = NULL;
1445 goto no_msix; /* Will go to Legacy setup */
1449 if (ixl_enable_iwarp && hw->func_caps.iwarp) {
1450 #if __FreeBSD_version >= 1100000
1451 if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
1453 iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
1457 iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
1458 if(ixl_limit_iwarp_msix > 0)
1459 iw_want = min(iw_want, ixl_limit_iwarp_msix);
1461 iw_want = min(iw_want, 1);
1463 available -= vectors;
1464 if (available > 0) {
1465 iw_vectors = (available >= iw_want) ?
1466 iw_want : available;
1467 vectors += iw_vectors;
1473 ixl_set_msix_enable(dev);
1474 if (pci_alloc_msix(dev, &vectors) == 0) {
1475 device_printf(pf->dev,
1476 "Using MSIX interrupts with %d vectors\n", vectors);
1479 if (ixl_enable_iwarp && hw->func_caps.iwarp)
1481 pf->iw_msix = iw_vectors;
1482 device_printf(pf->dev,
1483 "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
1488 pf->vsi.num_queues = queues;
1491 * If we're doing RSS, the number of queues needs to
1492 * match the number of RSS buckets that are configured.
1494 * + If there's more queues than RSS buckets, we'll end
1495 * up with queues that get no traffic.
1497 * + If there's more RSS buckets than queues, we'll end
1498 * up having multiple RSS buckets map to the same queue,
1499 * so there'll be some contention.
1501 if (queues != rss_getnumbuckets()) {
1503 "%s: queues (%d) != RSS buckets (%d)"
1504 "; performance will be impacted.\n",
1505 __func__, queues, rss_getnumbuckets());
1511 vectors = pci_msi_count(dev);
1512 pf->vsi.num_queues = 1;
1514 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1515 device_printf(pf->dev, "Using an MSI interrupt\n");
1518 device_printf(pf->dev, "Using a Legacy interrupt\n");
1524 * Configure admin queue/misc interrupt cause registers in hardware.
1527 ixl_configure_intr0_msix(struct ixl_pf *pf)
1529 struct i40e_hw *hw = &pf->hw;
1532 /* First set up the adminq - vector 0 */
1533 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
1534 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
1536 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1537 I40E_PFINT_ICR0_ENA_GRST_MASK |
1538 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1539 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1540 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1541 I40E_PFINT_ICR0_ENA_VFLR_MASK |
1542 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
1543 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1544 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1547 * 0x7FF is the end of the queue list.
1548 * This means we won't use MSI-X vector 0 for a queue interrupt
1551 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1552 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1553 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1555 wr32(hw, I40E_PFINT_DYN_CTL0,
1556 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1557 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1559 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1563 * Configure queue interrupt cause registers in hardware.
1566 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1568 struct i40e_hw *hw = &pf->hw;
1569 struct ixl_vsi *vsi = &pf->vsi;
1573 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1574 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1575 /* First queue type is RX / 0 */
1576 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1578 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1579 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1580 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1581 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1582 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1583 wr32(hw, I40E_QINT_RQCTL(i), reg);
1585 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1586 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1587 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1588 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1589 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1590 wr32(hw, I40E_QINT_TQCTL(i), reg);
1595 * Configure for MSI single vector operation
1598 ixl_configure_legacy(struct ixl_pf *pf)
1600 struct i40e_hw *hw = &pf->hw;
1601 struct ixl_vsi *vsi = &pf->vsi;
1602 struct ixl_queue *que = vsi->queues;
1603 struct rx_ring *rxr = &que->rxr;
1604 struct tx_ring *txr = &que->txr;
1608 vsi->tx_itr_setting = pf->tx_itr;
1609 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1610 vsi->tx_itr_setting);
1611 txr->itr = vsi->tx_itr_setting;
1613 vsi->rx_itr_setting = pf->rx_itr;
1614 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1615 vsi->rx_itr_setting);
1616 rxr->itr = vsi->rx_itr_setting;
1618 /* Setup "other" causes */
1619 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1620 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1621 | I40E_PFINT_ICR0_ENA_GRST_MASK
1622 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1623 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1624 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1625 | I40E_PFINT_ICR0_ENA_VFLR_MASK
1626 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1628 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1630 /* No ITR for non-queue interrupts */
1631 wr32(hw, I40E_PFINT_STAT_CTL0,
1632 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1634 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1635 wr32(hw, I40E_PFINT_LNKLST0, 0);
1637 /* Associate the queue pair to the vector and enable the q int */
1638 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1639 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1640 | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1641 wr32(hw, I40E_QINT_RQCTL(0), reg);
1643 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1644 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1645 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1646 wr32(hw, I40E_QINT_TQCTL(0), reg);
1650 ixl_allocate_pci_resources(struct ixl_pf *pf)
1653 struct i40e_hw *hw = &pf->hw;
1654 device_t dev = pf->dev;
1658 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1661 if (!(pf->pci_mem)) {
1662 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1665 /* Ensure proper PCI device operation */
1666 ixl_set_busmaster(dev);
1668 /* Save off the PCI information */
1669 hw->vendor_id = pci_get_vendor(dev);
1670 hw->device_id = pci_get_device(dev);
1671 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1672 hw->subsystem_vendor_id =
1673 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1674 hw->subsystem_device_id =
1675 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1677 hw->bus.device = pci_get_slot(dev);
1678 hw->bus.func = pci_get_function(dev);
1680 /* Save off register access information */
1681 pf->osdep.mem_bus_space_tag =
1682 rman_get_bustag(pf->pci_mem);
1683 pf->osdep.mem_bus_space_handle =
1684 rman_get_bushandle(pf->pci_mem);
1685 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1686 pf->osdep.flush_reg = I40E_GLGEN_STAT;
1687 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1689 pf->hw.back = &pf->osdep;
1695 * Teardown and release the admin queue/misc vector
1699 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1701 device_t dev = pf->dev;
1704 if (pf->admvec) /* we are doing MSIX */
1705 rid = pf->admvec + 1;
1707 (pf->msix != 0) ? (rid = 1):(rid = 0);
1709 if (pf->tag != NULL) {
1710 bus_teardown_intr(dev, pf->res, pf->tag);
1712 device_printf(dev, "bus_teardown_intr() for"
1713 " interrupt 0 failed\n");
1718 if (pf->res != NULL) {
1719 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1721 device_printf(dev, "bus_release_resource() for"
1722 " interrupt 0 failed [rid=%d]\n", rid);
1732 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1734 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1735 struct ixl_queue *que = vsi->queues;
1736 device_t dev = vsi->dev;
1739 /* We may get here before stations are setup */
1740 if ((pf->msix < 2) || (que == NULL))
1743 /* Release all MSIX queue resources */
1744 for (int i = 0; i < vsi->num_queues; i++, que++) {
1745 rid = que->msix + 1;
1746 if (que->tag != NULL) {
1747 error = bus_teardown_intr(dev, que->res, que->tag);
1749 device_printf(dev, "bus_teardown_intr() for"
1750 " Queue %d interrupt failed\n",
1756 if (que->res != NULL) {
1757 error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1759 device_printf(dev, "bus_release_resource() for"
1760 " Queue %d interrupt failed [rid=%d]\n",
1772 ixl_free_pci_resources(struct ixl_pf *pf)
1774 device_t dev = pf->dev;
1777 ixl_teardown_queue_msix(&pf->vsi);
1778 ixl_teardown_adminq_msix(pf);
1781 pci_release_msi(dev);
1783 memrid = PCIR_BAR(IXL_MSIX_BAR);
1785 if (pf->msix_mem != NULL)
1786 bus_release_resource(dev, SYS_RES_MEMORY,
1787 memrid, pf->msix_mem);
1789 if (pf->pci_mem != NULL)
1790 bus_release_resource(dev, SYS_RES_MEMORY,
1791 PCIR_BAR(0), pf->pci_mem);
1797 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1799 /* Display supported media types */
1800 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1801 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1803 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1804 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1805 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1806 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1807 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1808 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1810 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1811 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1812 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1813 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1815 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1816 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1817 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1818 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1819 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1820 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1822 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1823 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1824 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1825 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1826 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1827 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1828 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1829 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1830 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1831 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1833 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1834 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1836 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1837 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1838 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1839 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1840 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
1841 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1842 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1843 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1844 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1845 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1846 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1848 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1849 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1851 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1852 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1853 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1854 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1856 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1857 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1858 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1859 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1860 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1861 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1862 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1863 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1864 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1865 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1866 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1867 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1870 /*********************************************************************
1872 * Setup networking device structure and register an interface.
1874 **********************************************************************/
1876 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1878 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1880 struct i40e_hw *hw = vsi->hw;
1881 struct ixl_queue *que = vsi->queues;
1882 struct i40e_aq_get_phy_abilities_resp abilities;
1883 enum i40e_status_code aq_error = 0;
1885 INIT_DEBUGOUT("ixl_setup_interface: begin");
1887 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1889 device_printf(dev, "can not allocate ifnet structure\n");
1892 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1893 ifp->if_mtu = ETHERMTU;
1894 ifp->if_init = ixl_init;
1895 ifp->if_softc = vsi;
1896 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1897 ifp->if_ioctl = ixl_ioctl;
1899 #if __FreeBSD_version >= 1100036
1900 if_setgetcounterfn(ifp, ixl_get_counter);
1903 ifp->if_transmit = ixl_mq_start;
1905 ifp->if_qflush = ixl_qflush;
1907 ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1909 vsi->max_frame_size =
1910 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1911 + ETHER_VLAN_ENCAP_LEN;
1913 /* Set TSO limits */
1914 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1915 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1916 ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1919 * Tell the upper layer(s) we support long frames.
1921 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1923 ifp->if_capabilities |= IFCAP_HWCSUM;
1924 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1925 ifp->if_capabilities |= IFCAP_TSO;
1926 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1927 ifp->if_capabilities |= IFCAP_LRO;
1929 /* VLAN capabilties */
1930 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1933 | IFCAP_VLAN_HWCSUM;
1934 ifp->if_capenable = ifp->if_capabilities;
1937 ** Don't turn this on by default, if vlans are
1938 ** created on another pseudo device (eg. lagg)
1939 ** then vlan events are not passed thru, breaking
1940 ** operation, but with HW FILTER off it works. If
1941 ** using vlans directly on the ixl driver you can
1942 ** enable this and get full hardware tag filtering.
1944 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1947 * Specify the media types supported by this adapter and register
1948 * callbacks to update media and link information
1950 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1953 aq_error = i40e_aq_get_phy_capabilities(hw,
1954 FALSE, TRUE, &abilities, NULL);
1955 /* May need delay to detect fiber correctly */
1956 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1957 i40e_msec_delay(200);
1958 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1959 TRUE, &abilities, NULL);
1962 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1963 device_printf(dev, "Unknown PHY type detected!\n");
1966 "Error getting supported media types, err %d,"
1967 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1969 pf->supported_speeds = abilities.link_speed;
1970 #if __FreeBSD_version >= 1100000
1971 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1973 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1976 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1979 /* Use autoselect media by default */
1980 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1981 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1983 ether_ifattach(ifp, hw->mac.addr);
1989 ** Run when the Admin Queue gets a link state change interrupt.
1992 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1994 struct i40e_hw *hw = &pf->hw;
1995 device_t dev = pf->dev;
1996 struct i40e_aqc_get_link_status *status =
1997 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1999 /* Request link status from adapter */
2000 hw->phy.get_link_info = TRUE;
2001 i40e_get_link_status(hw, &pf->link_up);
2003 /* Print out message if an unqualified module is found */
2004 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2005 (pf->advertised_speed) &&
2006 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2007 (!(status->link_info & I40E_AQ_LINK_UP)))
2008 device_printf(dev, "Link failed because "
2009 "an unqualified module was detected!\n");
2011 /* Update OS link info */
2012 ixl_update_link_status(pf);
2015 /*********************************************************************
2017 * Get Firmware Switch configuration
2018 * - this will need to be more robust when more complex
2019 * switch configurations are enabled.
2021 **********************************************************************/
2023 ixl_switch_config(struct ixl_pf *pf)
2025 struct i40e_hw *hw = &pf->hw;
2026 struct ixl_vsi *vsi = &pf->vsi;
2027 device_t dev = vsi->dev;
2028 struct i40e_aqc_get_switch_config_resp *sw_config;
2029 u8 aq_buf[I40E_AQ_LARGE_BUF];
2033 memset(&aq_buf, 0, sizeof(aq_buf));
2034 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2035 ret = i40e_aq_get_switch_config(hw, sw_config,
2036 sizeof(aq_buf), &next, NULL);
2038 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2039 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2042 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2044 "Switch config: header reported: %d in structure, %d total\n",
2045 sw_config->header.num_reported, sw_config->header.num_total);
2046 for (int i = 0; i < sw_config->header.num_reported; i++) {
2048 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2049 sw_config->element[i].element_type,
2050 sw_config->element[i].seid,
2051 sw_config->element[i].uplink_seid,
2052 sw_config->element[i].downlink_seid);
2055 /* Simplified due to a single VSI */
2056 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2057 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2058 vsi->seid = sw_config->element[0].seid;
2062 /*********************************************************************
2064 * Initialize the VSI: this handles contexts, which means things
2065 * like the number of descriptors, buffer size,
2066 * plus we init the rings thru this function.
2068 **********************************************************************/
2070 ixl_initialize_vsi(struct ixl_vsi *vsi)
2072 struct ixl_pf *pf = vsi->back;
2073 struct ixl_queue *que = vsi->queues;
2074 device_t dev = vsi->dev;
2075 struct i40e_hw *hw = vsi->hw;
2076 struct i40e_vsi_context ctxt;
2080 memset(&ctxt, 0, sizeof(ctxt));
2081 ctxt.seid = vsi->seid;
2082 if (pf->veb_seid != 0)
2083 ctxt.uplink_seid = pf->veb_seid;
2084 ctxt.pf_num = hw->pf_id;
2085 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2087 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2088 " aq_error %d\n", err, hw->aq.asq_last_status);
2091 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2092 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2093 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2094 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2095 ctxt.uplink_seid, ctxt.vsi_number,
2096 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2097 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2098 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2100 ** Set the queue and traffic class bits
2101 ** - when multiple traffic classes are supported
2102 ** this will need to be more robust.
2104 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2105 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2106 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2107 ctxt.info.queue_mapping[0] = 0;
2109 * This VSI will only use traffic class 0; start traffic class 0's
2110 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2111 * the driver may not use all of them).
2113 tc_queues = bsrl(pf->qtag.num_allocated);
2114 ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2115 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2116 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2117 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2119 /* Set VLAN receive stripping mode */
2120 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2121 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2122 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2123 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2125 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2128 /* Set TCP Enable for iWARP capable VSI */
2129 if (ixl_enable_iwarp && pf->iw_enabled) {
2130 ctxt.info.valid_sections |=
2131 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2132 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2135 /* Save VSI number and info for use later */
2136 vsi->vsi_num = ctxt.vsi_number;
2137 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2139 /* Reset VSI statistics */
2140 ixl_vsi_reset_stats(vsi);
2141 vsi->hw_filters_add = 0;
2142 vsi->hw_filters_del = 0;
2144 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2146 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2148 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2149 " aq_error %d\n", err, hw->aq.asq_last_status);
2153 for (int i = 0; i < vsi->num_queues; i++, que++) {
2154 struct tx_ring *txr = &que->txr;
2155 struct rx_ring *rxr = &que->rxr;
2156 struct i40e_hmc_obj_txq tctx;
2157 struct i40e_hmc_obj_rxq rctx;
2161 /* Setup the HMC TX Context */
2162 size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
2163 bzero(&tctx, sizeof(tctx));
2164 tctx.new_context = 1;
2165 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2166 tctx.qlen = que->num_tx_desc;
2167 tctx.fc_ena = 0; /* Disable FCoE */
2169 * This value needs to pulled from the VSI that this queue
2170 * is assigned to. Index into array is traffic class.
2172 tctx.rdylist = vsi->info.qs_handle[0];
2174 * Set these to enable Head Writeback
2175 * - Address is last entry in TX ring (reserved for HWB index)
2176 * Leave these as 0 for Descriptor Writeback
2178 if (vsi->enable_head_writeback) {
2179 tctx.head_wb_ena = 1;
2180 tctx.head_wb_addr = txr->dma.pa +
2181 (que->num_tx_desc * sizeof(struct i40e_tx_desc));
2183 tctx.rdylist_act = 0;
2184 err = i40e_clear_lan_tx_queue_context(hw, i);
2186 device_printf(dev, "Unable to clear TX context\n");
2189 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2191 device_printf(dev, "Unable to set TX context\n");
2194 /* Associate the ring with this PF */
2195 txctl = I40E_QTX_CTL_PF_QUEUE;
2196 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2197 I40E_QTX_CTL_PF_INDX_MASK);
2198 wr32(hw, I40E_QTX_CTL(i), txctl);
2201 /* Do ring (re)init */
2202 ixl_init_tx_ring(que);
2204 /* Next setup the HMC RX Context */
2205 if (vsi->max_frame_size <= MCLBYTES)
2206 rxr->mbuf_sz = MCLBYTES;
2208 rxr->mbuf_sz = MJUMPAGESIZE;
2210 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2212 /* Set up an RX context for the HMC */
2213 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2214 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2215 /* ignore header split for now */
2216 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2217 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2218 vsi->max_frame_size : max_rxmax;
2220 rctx.dsize = 1; /* do 32byte descriptors */
2221 rctx.hsplit_0 = 0; /* no header split */
2222 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2223 rctx.qlen = que->num_rx_desc;
2224 rctx.tphrdesc_ena = 1;
2225 rctx.tphwdesc_ena = 1;
2226 rctx.tphdata_ena = 0; /* Header Split related */
2227 rctx.tphhead_ena = 0; /* Header Split related */
2228 rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */
2231 rctx.showiv = 1; /* Strip inner VLAN header */
2232 rctx.fc_ena = 0; /* Disable FCoE */
2233 rctx.prefena = 1; /* Prefetch descriptors */
2235 err = i40e_clear_lan_rx_queue_context(hw, i);
2238 "Unable to clear RX context %d\n", i);
2241 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2243 device_printf(dev, "Unable to set RX context %d\n", i);
2246 err = ixl_init_rx_ring(que);
2248 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2252 /* preserve queue */
2253 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2254 struct netmap_adapter *na = NA(vsi->ifp);
2255 struct netmap_kring *kring = na->rx_rings[i];
2256 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2257 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2259 #endif /* DEV_NETMAP */
2260 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
2269 ixl_vsi_free_queues(struct ixl_vsi *vsi)
2271 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2272 struct ixl_queue *que = vsi->queues;
2274 if (NULL == vsi->queues)
2277 for (int i = 0; i < vsi->num_queues; i++, que++) {
2278 struct tx_ring *txr = &que->txr;
2279 struct rx_ring *rxr = &que->rxr;
2281 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2285 buf_ring_free(txr->br, M_DEVBUF);
2286 ixl_free_que_tx(que);
2288 i40e_free_dma_mem(&pf->hw, &txr->dma);
2290 IXL_TX_LOCK_DESTROY(txr);
2292 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2295 ixl_free_que_rx(que);
2297 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2299 IXL_RX_LOCK_DESTROY(rxr);
2304 /*********************************************************************
2306 * Free all VSI structs.
2308 **********************************************************************/
2310 ixl_free_vsi(struct ixl_vsi *vsi)
2313 /* Free station queues */
2314 ixl_vsi_free_queues(vsi);
2316 free(vsi->queues, M_DEVBUF);
2318 /* Free VSI filter list */
2319 ixl_free_mac_filters(vsi);
2323 ixl_free_mac_filters(struct ixl_vsi *vsi)
2325 struct ixl_mac_filter *f;
2327 while (!SLIST_EMPTY(&vsi->ftl)) {
2328 f = SLIST_FIRST(&vsi->ftl);
2329 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2335 * Fill out fields in queue struct and setup tx/rx memory and structs
2338 ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
2340 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2341 device_t dev = pf->dev;
2342 struct i40e_hw *hw = &pf->hw;
2343 struct tx_ring *txr = &que->txr;
2344 struct rx_ring *rxr = &que->rxr;
2348 que->num_tx_desc = vsi->num_tx_desc;
2349 que->num_rx_desc = vsi->num_rx_desc;
2354 txr->tail = I40E_QTX_TAIL(que->me);
2356 /* Initialize the TX lock */
2357 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2358 device_get_nameunit(dev), que->me);
2359 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2361 * Create the TX descriptor ring
2363 * In Head Writeback mode, the descriptor ring is one bigger
2364 * than the number of descriptors for space for the HW to
2365 * write back index of last completed descriptor.
2367 if (vsi->enable_head_writeback) {
2368 tsize = roundup2((que->num_tx_desc *
2369 sizeof(struct i40e_tx_desc)) +
2370 sizeof(u32), DBA_ALIGN);
2372 tsize = roundup2((que->num_tx_desc *
2373 sizeof(struct i40e_tx_desc)), DBA_ALIGN);
2375 if (i40e_allocate_dma_mem(hw,
2376 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2378 "Unable to allocate TX Descriptor memory\n");
2380 goto err_destroy_tx_mtx;
2382 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2383 bzero((void *)txr->base, tsize);
2384 /* Now allocate transmit soft structs for the ring */
2385 if (ixl_allocate_tx_data(que)) {
2387 "Critical Failure setting up TX structures\n");
2389 goto err_free_tx_dma;
2391 /* Allocate a buf ring */
2392 txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2393 M_NOWAIT, &txr->mtx);
2394 if (txr->br == NULL) {
2396 "Critical Failure setting up TX buf ring\n");
2398 goto err_free_tx_data;
2401 rsize = roundup2(que->num_rx_desc *
2402 sizeof(union i40e_rx_desc), DBA_ALIGN);
2404 rxr->tail = I40E_QRX_TAIL(que->me);
2406 /* Initialize the RX side lock */
2407 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2408 device_get_nameunit(dev), que->me);
2409 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2411 if (i40e_allocate_dma_mem(hw,
2412 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2414 "Unable to allocate RX Descriptor memory\n");
2416 goto err_destroy_rx_mtx;
2418 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2419 bzero((void *)rxr->base, rsize);
2420 /* Allocate receive soft structs for the ring*/
2421 if (ixl_allocate_rx_data(que)) {
2423 "Critical Failure setting up receive structs\n");
2425 goto err_free_rx_dma;
2431 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2433 mtx_destroy(&rxr->mtx);
2434 /* err_free_tx_buf_ring */
2435 buf_ring_free(txr->br, M_DEVBUF);
2437 ixl_free_que_tx(que);
2439 i40e_free_dma_mem(&pf->hw, &txr->dma);
2441 mtx_destroy(&txr->mtx);
2447 ixl_vsi_setup_queues(struct ixl_vsi *vsi)
2449 struct ixl_queue *que;
2452 for (int i = 0; i < vsi->num_queues; i++) {
2453 que = &vsi->queues[i];
2454 error = ixl_vsi_setup_queue(vsi, que, i);
2462 /*********************************************************************
2464 * Allocate memory for the VSI (virtual station interface) and their
2465 * associated queues, rings and the descriptors associated with each,
2466 * called only once at attach.
2468 **********************************************************************/
2470 ixl_setup_stations(struct ixl_pf *pf)
2472 device_t dev = pf->dev;
2473 struct ixl_vsi *vsi;
2477 vsi->back = (void *)pf;
2484 vsi->flags |= IXL_FLAGS_USES_MSIX;
2486 /* Get memory for the station queues */
2488 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2489 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2490 device_printf(dev, "Unable to allocate queue memory\n");
2492 goto ixl_setup_stations_err;
2495 /* Then setup each queue */
2496 error = ixl_vsi_setup_queues(vsi);
2497 ixl_setup_stations_err:
2502 ** Provide a update to the queue RX
2503 ** interrupt moderation value.
2506 ixl_set_queue_rx_itr(struct ixl_queue *que)
2508 struct ixl_vsi *vsi = que->vsi;
2509 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2510 struct i40e_hw *hw = vsi->hw;
2511 struct rx_ring *rxr = &que->rxr;
2516 /* Idle, do nothing */
2517 if (rxr->bytes == 0)
2520 if (pf->dynamic_rx_itr) {
2521 rx_bytes = rxr->bytes/rxr->itr;
2524 /* Adjust latency range */
2525 switch (rxr->latency) {
2526 case IXL_LOW_LATENCY:
2527 if (rx_bytes > 10) {
2528 rx_latency = IXL_AVE_LATENCY;
2529 rx_itr = IXL_ITR_20K;
2532 case IXL_AVE_LATENCY:
2533 if (rx_bytes > 20) {
2534 rx_latency = IXL_BULK_LATENCY;
2535 rx_itr = IXL_ITR_8K;
2536 } else if (rx_bytes <= 10) {
2537 rx_latency = IXL_LOW_LATENCY;
2538 rx_itr = IXL_ITR_100K;
2541 case IXL_BULK_LATENCY:
2542 if (rx_bytes <= 20) {
2543 rx_latency = IXL_AVE_LATENCY;
2544 rx_itr = IXL_ITR_20K;
2549 rxr->latency = rx_latency;
2551 if (rx_itr != rxr->itr) {
2552 /* do an exponential smoothing */
2553 rx_itr = (10 * rx_itr * rxr->itr) /
2554 ((9 * rx_itr) + rxr->itr);
2555 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2556 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2557 que->me), rxr->itr);
2559 } else { /* We may have have toggled to non-dynamic */
2560 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2561 vsi->rx_itr_setting = pf->rx_itr;
2562 /* Update the hardware if needed */
2563 if (rxr->itr != vsi->rx_itr_setting) {
2564 rxr->itr = vsi->rx_itr_setting;
2565 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2566 que->me), rxr->itr);
2576 ** Provide a update to the queue TX
2577 ** interrupt moderation value.
2580 ixl_set_queue_tx_itr(struct ixl_queue *que)
2582 struct ixl_vsi *vsi = que->vsi;
2583 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2584 struct i40e_hw *hw = vsi->hw;
2585 struct tx_ring *txr = &que->txr;
2591 /* Idle, do nothing */
2592 if (txr->bytes == 0)
2595 if (pf->dynamic_tx_itr) {
2596 tx_bytes = txr->bytes/txr->itr;
2599 switch (txr->latency) {
2600 case IXL_LOW_LATENCY:
2601 if (tx_bytes > 10) {
2602 tx_latency = IXL_AVE_LATENCY;
2603 tx_itr = IXL_ITR_20K;
2606 case IXL_AVE_LATENCY:
2607 if (tx_bytes > 20) {
2608 tx_latency = IXL_BULK_LATENCY;
2609 tx_itr = IXL_ITR_8K;
2610 } else if (tx_bytes <= 10) {
2611 tx_latency = IXL_LOW_LATENCY;
2612 tx_itr = IXL_ITR_100K;
2615 case IXL_BULK_LATENCY:
2616 if (tx_bytes <= 20) {
2617 tx_latency = IXL_AVE_LATENCY;
2618 tx_itr = IXL_ITR_20K;
2623 txr->latency = tx_latency;
2625 if (tx_itr != txr->itr) {
2626 /* do an exponential smoothing */
2627 tx_itr = (10 * tx_itr * txr->itr) /
2628 ((9 * tx_itr) + txr->itr);
2629 txr->itr = min(tx_itr, IXL_MAX_ITR);
2630 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2631 que->me), txr->itr);
2634 } else { /* We may have have toggled to non-dynamic */
2635 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2636 vsi->tx_itr_setting = pf->tx_itr;
2637 /* Update the hardware if needed */
2638 if (txr->itr != vsi->tx_itr_setting) {
2639 txr->itr = vsi->tx_itr_setting;
2640 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2641 que->me), txr->itr);
2650 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2651 struct sysctl_ctx_list *ctx, const char *sysctl_name)
2653 struct sysctl_oid *tree;
2654 struct sysctl_oid_list *child;
2655 struct sysctl_oid_list *vsi_list;
2657 tree = device_get_sysctl_tree(pf->dev);
2658 child = SYSCTL_CHILDREN(tree);
2659 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2660 CTLFLAG_RD, NULL, "VSI Number");
2661 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2663 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2668 * ixl_sysctl_qtx_tail_handler
2669 * Retrieves I40E_QTX_TAIL value from hardware
2673 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2675 struct ixl_queue *que;
2679 que = ((struct ixl_queue *)oidp->oid_arg1);
2682 val = rd32(que->vsi->hw, que->txr.tail);
2683 error = sysctl_handle_int(oidp, &val, 0, req);
2684 if (error || !req->newptr)
2690 * ixl_sysctl_qrx_tail_handler
2691 * Retrieves I40E_QRX_TAIL value from hardware
2695 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2697 struct ixl_queue *que;
2701 que = ((struct ixl_queue *)oidp->oid_arg1);
2704 val = rd32(que->vsi->hw, que->rxr.tail);
2705 error = sysctl_handle_int(oidp, &val, 0, req);
2706 if (error || !req->newptr)
2713 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2714 * Writes to the ITR registers immediately.
2717 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2719 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2720 device_t dev = pf->dev;
2722 int requested_tx_itr;
2724 requested_tx_itr = pf->tx_itr;
2725 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2726 if ((error) || (req->newptr == NULL))
2728 if (pf->dynamic_tx_itr) {
2730 "Cannot set TX itr value while dynamic TX itr is enabled\n");
2733 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2735 "Invalid TX itr value; value must be between 0 and %d\n",
2740 pf->tx_itr = requested_tx_itr;
2741 ixl_configure_tx_itr(pf);
2747 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2748 * Writes to the ITR registers immediately.
2751 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2753 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2754 device_t dev = pf->dev;
2756 int requested_rx_itr;
2758 requested_rx_itr = pf->rx_itr;
2759 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2760 if ((error) || (req->newptr == NULL))
2762 if (pf->dynamic_rx_itr) {
2764 "Cannot set RX itr value while dynamic RX itr is enabled\n");
2767 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2769 "Invalid RX itr value; value must be between 0 and %d\n",
2774 pf->rx_itr = requested_rx_itr;
2775 ixl_configure_rx_itr(pf);
2781 ixl_add_hw_stats(struct ixl_pf *pf)
2783 device_t dev = pf->dev;
2784 struct ixl_vsi *vsi = &pf->vsi;
2785 struct ixl_queue *queues = vsi->queues;
2786 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2788 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2789 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2790 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2791 struct sysctl_oid_list *vsi_list;
2793 struct sysctl_oid *queue_node;
2794 struct sysctl_oid_list *queue_list;
2796 struct tx_ring *txr;
2797 struct rx_ring *rxr;
2798 char queue_namebuf[QUEUE_NAME_LEN];
2800 /* Driver statistics */
2801 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2802 CTLFLAG_RD, &pf->watchdog_events,
2803 "Watchdog timeouts");
2804 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2805 CTLFLAG_RD, &pf->admin_irq,
2806 "Admin Queue IRQ Handled");
2808 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2809 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2811 /* Queue statistics */
2812 for (int q = 0; q < vsi->num_queues; q++) {
2813 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2814 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2815 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2816 queue_list = SYSCTL_CHILDREN(queue_node);
2818 txr = &(queues[q].txr);
2819 rxr = &(queues[q].rxr);
2821 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2822 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2823 "m_defrag() failed");
2824 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2825 CTLFLAG_RD, &(queues[q].irqs),
2826 "irqs on this queue");
2827 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2828 CTLFLAG_RD, &(queues[q].tso),
2830 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2831 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2832 "Driver tx dma failure in xmit");
2833 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2834 CTLFLAG_RD, &(queues[q].mss_too_small),
2835 "TSO sends with an MSS less than 64");
2836 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2837 CTLFLAG_RD, &(txr->no_desc),
2838 "Queue No Descriptor Available");
2839 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2840 CTLFLAG_RD, &(txr->total_packets),
2841 "Queue Packets Transmitted");
2842 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2843 CTLFLAG_RD, &(txr->tx_bytes),
2844 "Queue Bytes Transmitted");
2845 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2846 CTLFLAG_RD, &(rxr->rx_packets),
2847 "Queue Packets Received");
2848 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2849 CTLFLAG_RD, &(rxr->rx_bytes),
2850 "Queue Bytes Received");
2851 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2852 CTLFLAG_RD, &(rxr->desc_errs),
2853 "Queue Rx Descriptor Errors");
2854 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2855 CTLFLAG_RD, &(rxr->itr), 0,
2856 "Queue Rx ITR Interval");
2857 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2858 CTLFLAG_RD, &(txr->itr), 0,
2859 "Queue Tx ITR Interval");
2861 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
2862 CTLFLAG_RD, &(txr->watchdog_timer), 0,
2863 "Ticks before watchdog timer causes interface reinit");
2864 SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
2865 CTLFLAG_RD, &(txr->next_avail), 0,
2866 "Next TX descriptor to be used");
2867 SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
2868 CTLFLAG_RD, &(txr->next_to_clean), 0,
2869 "Next TX descriptor to be cleaned");
2870 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2871 CTLFLAG_RD, &(rxr->not_done),
2872 "Queue Rx Descriptors not Done");
2873 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2874 CTLFLAG_RD, &(rxr->next_refresh), 0,
2875 "Queue Rx Descriptors not Done");
2876 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2877 CTLFLAG_RD, &(rxr->next_check), 0,
2878 "Queue Rx Descriptors not Done");
2879 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2880 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2881 sizeof(struct ixl_queue),
2882 ixl_sysctl_qrx_tail_handler, "IU",
2883 "Queue Receive Descriptor Tail");
2884 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2885 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2886 sizeof(struct ixl_queue),
2887 ixl_sysctl_qtx_tail_handler, "IU",
2888 "Queue Transmit Descriptor Tail");
2893 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2897 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2898 struct sysctl_oid_list *child,
2899 struct i40e_eth_stats *eth_stats)
2901 struct ixl_sysctl_info ctls[] =
2903 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2904 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
2905 "Unicast Packets Received"},
2906 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
2907 "Multicast Packets Received"},
2908 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
2909 "Broadcast Packets Received"},
2910 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2911 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2912 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2913 {ð_stats->tx_multicast, "mcast_pkts_txd",
2914 "Multicast Packets Transmitted"},
2915 {ð_stats->tx_broadcast, "bcast_pkts_txd",
2916 "Broadcast Packets Transmitted"},
2921 struct ixl_sysctl_info *entry = ctls;
2922 while (entry->stat != 0)
2924 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2925 CTLFLAG_RD, entry->stat,
2926 entry->description);
2932 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2933 struct sysctl_oid_list *child,
2934 struct i40e_hw_port_stats *stats)
2936 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2937 CTLFLAG_RD, NULL, "Mac Statistics");
2938 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2940 struct i40e_eth_stats *eth_stats = &stats->eth;
2941 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2943 struct ixl_sysctl_info ctls[] =
2945 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2946 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2947 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2948 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2949 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2950 /* Packet Reception Stats */
2951 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2952 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2953 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2954 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2955 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2956 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2957 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2958 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2959 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2960 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2961 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2962 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2963 /* Packet Transmission Stats */
2964 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2965 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2966 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2967 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2968 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2969 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2970 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2972 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2973 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2974 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2975 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2980 struct ixl_sysctl_info *entry = ctls;
2981 while (entry->stat != 0)
2983 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2984 CTLFLAG_RD, entry->stat,
2985 entry->description);
2991 ixl_set_rss_key(struct ixl_pf *pf)
2993 struct i40e_hw *hw = &pf->hw;
2994 struct ixl_vsi *vsi = &pf->vsi;
2995 device_t dev = pf->dev;
2996 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2997 enum i40e_status_code status;
3000 /* Fetch the configured RSS key */
3001 rss_getkey((uint8_t *) &rss_seed);
3003 ixl_get_default_rss_key(rss_seed);
3005 /* Fill out hash function seed */
3006 if (hw->mac.type == I40E_MAC_X722) {
3007 struct i40e_aqc_get_set_rss_key_data key_data;
3008 bcopy(rss_seed, &key_data, 52);
3009 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
3012 "i40e_aq_set_rss_key status %s, error %s\n",
3013 i40e_stat_str(hw, status),
3014 i40e_aq_str(hw, hw->aq.asq_last_status));
3016 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
3017 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3022 * Configure enabled PCTYPES for RSS.
3025 ixl_set_rss_pctypes(struct ixl_pf *pf)
3027 struct i40e_hw *hw = &pf->hw;
3028 u64 set_hena = 0, hena;
3031 u32 rss_hash_config;
3033 rss_hash_config = rss_gethashconfig();
3034 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3035 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3036 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3037 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3038 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3039 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3040 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3041 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3042 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3043 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3044 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3045 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3046 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3047 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3049 if (hw->mac.type == I40E_MAC_X722)
3050 set_hena = IXL_DEFAULT_RSS_HENA_X722;
3052 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
3054 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3055 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3057 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
3058 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3063 ixl_set_rss_hlut(struct ixl_pf *pf)
3065 struct i40e_hw *hw = &pf->hw;
3066 device_t dev = pf->dev;
3067 struct ixl_vsi *vsi = &pf->vsi;
3069 int lut_entry_width;
3071 enum i40e_status_code status;
3073 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3075 /* Populate the LUT with max no. of queues in round robin fashion */
3077 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3080 * Fetch the RSS bucket id for the given indirection entry.
3081 * Cap it at the number of configured buckets (which is
3084 que_id = rss_get_indirection_to_bucket(i);
3085 que_id = que_id % vsi->num_queues;
3087 que_id = i % vsi->num_queues;
3089 lut = (que_id & ((0x1 << lut_entry_width) - 1));
3093 if (hw->mac.type == I40E_MAC_X722) {
3094 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3096 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3097 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3099 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3100 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3106 ** Setup the PF's RSS parameters.
3109 ixl_config_rss(struct ixl_pf *pf)
3111 ixl_set_rss_key(pf);
3112 ixl_set_rss_pctypes(pf);
3113 ixl_set_rss_hlut(pf);
3117 ** This routine is run via an vlan config EVENT,
3118 ** it enables us to use the HW Filter table since
3119 ** we can get the vlan id. This just creates the
3120 ** entry in the soft version of the VFTA, init will
3121 ** repopulate the real table.
3124 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3126 struct ixl_vsi *vsi = ifp->if_softc;
3127 struct i40e_hw *hw = vsi->hw;
3128 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3130 if (ifp->if_softc != arg) /* Not our event */
3133 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3138 ixl_add_filter(vsi, hw->mac.addr, vtag);
3143 ** This routine is run via an vlan
3144 ** unconfig EVENT, remove our entry
3145 ** in the soft vfta.
3148 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3150 struct ixl_vsi *vsi = ifp->if_softc;
3151 struct i40e_hw *hw = vsi->hw;
3152 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3154 if (ifp->if_softc != arg)
3157 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3162 ixl_del_filter(vsi, hw->mac.addr, vtag);
3167 ** This routine updates vlan filters, called by init
3168 ** it scans the filter table and then updates the hw
3169 ** after a soft reset.
3172 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3174 struct ixl_mac_filter *f;
3177 if (vsi->num_vlans == 0)
3180 ** Scan the filter list for vlan entries,
3181 ** mark them for addition and then call
3182 ** for the AQ update.
3184 SLIST_FOREACH(f, &vsi->ftl, next) {
3185 if (f->flags & IXL_FILTER_VLAN) {
3193 printf("setup vlan: no filters found!\n");
3196 flags = IXL_FILTER_VLAN;
3197 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3198 ixl_add_hw_filters(vsi, flags, cnt);
3203 ** Initialize filter list and add filters that the hardware
3204 ** needs to know about.
3206 ** Requires VSI's filter list & seid to be set before calling.
3209 ixl_init_filters(struct ixl_vsi *vsi)
3211 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3213 /* Add broadcast address */
3214 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3217 * Prevent Tx flow control frames from being sent out by
3218 * non-firmware transmitters.
3219 * This affects every VSI in the PF.
3221 if (pf->enable_tx_fc_filter)
3222 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3226 ** This routine adds mulicast filters
3229 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3231 struct ixl_mac_filter *f;
3233 /* Does one already exist */
3234 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3238 f = ixl_get_filter(vsi);
3240 printf("WARNING: no filter available!!\n");
3243 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3244 f->vlan = IXL_VLAN_ANY;
3245 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3252 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3254 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3258 ** This routine adds macvlan filters
3261 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3263 struct ixl_mac_filter *f, *tmp;
3267 DEBUGOUT("ixl_add_filter: begin");
3272 /* Does one already exist */
3273 f = ixl_find_filter(vsi, macaddr, vlan);
3277 ** Is this the first vlan being registered, if so we
3278 ** need to remove the ANY filter that indicates we are
3279 ** not in a vlan, and replace that with a 0 filter.
3281 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3282 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3284 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3285 ixl_add_filter(vsi, macaddr, 0);
3289 f = ixl_get_filter(vsi);
3291 device_printf(dev, "WARNING: no filter available!!\n");
3294 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3296 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3297 if (f->vlan != IXL_VLAN_ANY)
3298 f->flags |= IXL_FILTER_VLAN;
3302 ixl_add_hw_filters(vsi, f->flags, 1);
3307 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3309 struct ixl_mac_filter *f;
3311 f = ixl_find_filter(vsi, macaddr, vlan);
3315 f->flags |= IXL_FILTER_DEL;
3316 ixl_del_hw_filters(vsi, 1);
3319 /* Check if this is the last vlan removal */
3320 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3321 /* Switch back to a non-vlan filter */
3322 ixl_del_filter(vsi, macaddr, 0);
3323 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3329 ** Find the filter with both matching mac addr and vlan id
3331 struct ixl_mac_filter *
3332 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3334 struct ixl_mac_filter *f;
3337 SLIST_FOREACH(f, &vsi->ftl, next) {
3338 if (!cmp_etheraddr(f->macaddr, macaddr))
3340 if (f->vlan == vlan) {
3352 ** This routine takes additions to the vsi filter
3353 ** table and creates an Admin Queue call to create
3354 ** the filters in the hardware.
3357 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3359 struct i40e_aqc_add_macvlan_element_data *a, *b;
3360 struct ixl_mac_filter *f;
3369 IXL_PF_LOCK_ASSERT(pf);
3371 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3372 M_DEVBUF, M_NOWAIT | M_ZERO);
3374 device_printf(dev, "add_hw_filters failed to get memory\n");
3379 ** Scan the filter list, each time we find one
3380 ** we add it to the admin queue array and turn off
3383 SLIST_FOREACH(f, &vsi->ftl, next) {
3384 if ((f->flags & flags) == flags) {
3385 b = &a[j]; // a pox on fvl long names :)
3386 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3387 if (f->vlan == IXL_VLAN_ANY) {
3389 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3391 b->vlan_tag = f->vlan;
3394 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3395 f->flags &= ~IXL_FILTER_ADD;
3402 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3404 device_printf(dev, "aq_add_macvlan err %d, "
3405 "aq_error %d\n", err, hw->aq.asq_last_status);
3407 vsi->hw_filters_add += j;
3414 ** This routine takes removals in the vsi filter
3415 ** table and creates an Admin Queue call to delete
3416 ** the filters in the hardware.
3419 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3421 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3425 struct ixl_mac_filter *f, *f_temp;
3428 DEBUGOUT("ixl_del_hw_filters: begin\n");
3434 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3435 M_DEVBUF, M_NOWAIT | M_ZERO);
3437 printf("del hw filter failed to get memory\n");
3441 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3442 if (f->flags & IXL_FILTER_DEL) {
3443 e = &d[j]; // a pox on fvl long names :)
3444 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3445 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3446 if (f->vlan == IXL_VLAN_ANY) {
3448 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3450 e->vlan_tag = f->vlan;
3452 /* delete entry from vsi list */
3453 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3461 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3462 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3464 for (int i = 0; i < j; i++)
3465 sc += (!d[i].error_code);
3466 vsi->hw_filters_del += sc;
3468 "Failed to remove %d/%d filters, aq error %d\n",
3469 j - sc, j, hw->aq.asq_last_status);
3471 vsi->hw_filters_del += j;
3475 DEBUGOUT("ixl_del_hw_filters: end\n");
3480 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3482 struct i40e_hw *hw = &pf->hw;
3487 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3489 ixl_dbg(pf, IXL_DBG_EN_DIS,
3490 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3493 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3495 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3496 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3497 I40E_QTX_ENA_QENA_STAT_MASK;
3498 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3499 /* Verify the enable took */
3500 for (int j = 0; j < 10; j++) {
3501 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3502 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3504 i40e_usec_delay(10);
3506 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3507 device_printf(pf->dev, "TX queue %d still disabled!\n",
3516 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3518 struct i40e_hw *hw = &pf->hw;
3523 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3525 ixl_dbg(pf, IXL_DBG_EN_DIS,
3526 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3529 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3530 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3531 I40E_QRX_ENA_QENA_STAT_MASK;
3532 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3533 /* Verify the enable took */
3534 for (int j = 0; j < 10; j++) {
3535 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3536 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3538 i40e_usec_delay(10);
3540 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3541 device_printf(pf->dev, "RX queue %d still disabled!\n",
3550 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3554 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3555 /* Called function already prints error message */
3558 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3562 /* For PF VSI only */
3564 ixl_enable_rings(struct ixl_vsi *vsi)
3566 struct ixl_pf *pf = vsi->back;
3569 for (int i = 0; i < vsi->num_queues; i++) {
3570 error = ixl_enable_ring(pf, &pf->qtag, i);
3579 * Returns error on first ring that is detected hung.
3582 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3584 struct i40e_hw *hw = &pf->hw;
3589 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3591 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3592 i40e_usec_delay(500);
3594 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3595 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3596 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3597 /* Verify the disable took */
3598 for (int j = 0; j < 10; j++) {
3599 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3600 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3602 i40e_msec_delay(10);
3604 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3605 device_printf(pf->dev, "TX queue %d still enabled!\n",
3614 * Returns error on first ring that is detected hung.
3617 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3619 struct i40e_hw *hw = &pf->hw;
3624 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3626 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3627 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3628 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3629 /* Verify the disable took */
3630 for (int j = 0; j < 10; j++) {
3631 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3632 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3634 i40e_msec_delay(10);
3636 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3637 device_printf(pf->dev, "RX queue %d still enabled!\n",
3646 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3650 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3651 /* Called function already prints error message */
3654 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3658 /* For PF VSI only */
3660 ixl_disable_rings(struct ixl_vsi *vsi)
3662 struct ixl_pf *pf = vsi->back;
3665 for (int i = 0; i < vsi->num_queues; i++) {
3666 error = ixl_disable_ring(pf, &pf->qtag, i);
3675 * ixl_handle_mdd_event
3677 * Called from interrupt handler to identify possibly malicious vfs
3678 * (But also detects events from the PF, as well)
3681 ixl_handle_mdd_event(struct ixl_pf *pf)
3683 struct i40e_hw *hw = &pf->hw;
3684 device_t dev = pf->dev;
3685 bool mdd_detected = false;
3686 bool pf_mdd_detected = false;
3689 /* find what triggered the MDD event */
3690 reg = rd32(hw, I40E_GL_MDET_TX);
3691 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3692 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3693 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3694 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3695 I40E_GL_MDET_TX_EVENT_SHIFT;
3696 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3697 I40E_GL_MDET_TX_QUEUE_SHIFT;
3699 "Malicious Driver Detection event %d"
3700 " on TX queue %d, pf number %d\n",
3701 event, queue, pf_num);
3702 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3703 mdd_detected = true;
3705 reg = rd32(hw, I40E_GL_MDET_RX);
3706 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3707 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3708 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3709 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3710 I40E_GL_MDET_RX_EVENT_SHIFT;
3711 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3712 I40E_GL_MDET_RX_QUEUE_SHIFT;
3714 "Malicious Driver Detection event %d"
3715 " on RX queue %d, pf number %d\n",
3716 event, queue, pf_num);
3717 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3718 mdd_detected = true;
3722 reg = rd32(hw, I40E_PF_MDET_TX);
3723 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3724 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3726 "MDD TX event is for this function!\n");
3727 pf_mdd_detected = true;
3729 reg = rd32(hw, I40E_PF_MDET_RX);
3730 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3731 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3733 "MDD RX event is for this function!\n");
3734 pf_mdd_detected = true;
3738 /* re-enable mdd interrupt cause */
3739 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3740 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3741 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3746 ixl_enable_intr(struct ixl_vsi *vsi)
3748 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3749 struct i40e_hw *hw = vsi->hw;
3750 struct ixl_queue *que = vsi->queues;
3753 for (int i = 0; i < vsi->num_queues; i++, que++)
3754 ixl_enable_queue(hw, que->me);
3756 ixl_enable_intr0(hw);
3760 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3762 struct i40e_hw *hw = vsi->hw;
3763 struct ixl_queue *que = vsi->queues;
3765 for (int i = 0; i < vsi->num_queues; i++, que++)
3766 ixl_disable_queue(hw, que->me);
3770 ixl_enable_intr0(struct i40e_hw *hw)
3774 /* Use IXL_ITR_NONE so ITR isn't updated here */
3775 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3776 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3777 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3778 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3782 ixl_disable_intr0(struct i40e_hw *hw)
3786 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3787 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3792 ixl_enable_queue(struct i40e_hw *hw, int id)
3796 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3797 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3798 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3799 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3803 ixl_disable_queue(struct i40e_hw *hw, int id)
3807 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3808 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3812 ixl_update_stats_counters(struct ixl_pf *pf)
3814 struct i40e_hw *hw = &pf->hw;
3815 struct ixl_vsi *vsi = &pf->vsi;
3818 struct i40e_hw_port_stats *nsd = &pf->stats;
3819 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3821 /* Update hw stats */
3822 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3823 pf->stat_offsets_loaded,
3824 &osd->crc_errors, &nsd->crc_errors);
3825 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3826 pf->stat_offsets_loaded,
3827 &osd->illegal_bytes, &nsd->illegal_bytes);
3828 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3829 I40E_GLPRT_GORCL(hw->port),
3830 pf->stat_offsets_loaded,
3831 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3832 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3833 I40E_GLPRT_GOTCL(hw->port),
3834 pf->stat_offsets_loaded,
3835 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3836 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3837 pf->stat_offsets_loaded,
3838 &osd->eth.rx_discards,
3839 &nsd->eth.rx_discards);
3840 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3841 I40E_GLPRT_UPRCL(hw->port),
3842 pf->stat_offsets_loaded,
3843 &osd->eth.rx_unicast,
3844 &nsd->eth.rx_unicast);
3845 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3846 I40E_GLPRT_UPTCL(hw->port),
3847 pf->stat_offsets_loaded,
3848 &osd->eth.tx_unicast,
3849 &nsd->eth.tx_unicast);
3850 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3851 I40E_GLPRT_MPRCL(hw->port),
3852 pf->stat_offsets_loaded,
3853 &osd->eth.rx_multicast,
3854 &nsd->eth.rx_multicast);
3855 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3856 I40E_GLPRT_MPTCL(hw->port),
3857 pf->stat_offsets_loaded,
3858 &osd->eth.tx_multicast,
3859 &nsd->eth.tx_multicast);
3860 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3861 I40E_GLPRT_BPRCL(hw->port),
3862 pf->stat_offsets_loaded,
3863 &osd->eth.rx_broadcast,
3864 &nsd->eth.rx_broadcast);
3865 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3866 I40E_GLPRT_BPTCL(hw->port),
3867 pf->stat_offsets_loaded,
3868 &osd->eth.tx_broadcast,
3869 &nsd->eth.tx_broadcast);
3871 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3872 pf->stat_offsets_loaded,
3873 &osd->tx_dropped_link_down,
3874 &nsd->tx_dropped_link_down);
3875 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3876 pf->stat_offsets_loaded,
3877 &osd->mac_local_faults,
3878 &nsd->mac_local_faults);
3879 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3880 pf->stat_offsets_loaded,
3881 &osd->mac_remote_faults,
3882 &nsd->mac_remote_faults);
3883 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3884 pf->stat_offsets_loaded,
3885 &osd->rx_length_errors,
3886 &nsd->rx_length_errors);
3888 /* Flow control (LFC) stats */
3889 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3890 pf->stat_offsets_loaded,
3891 &osd->link_xon_rx, &nsd->link_xon_rx);
3892 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3893 pf->stat_offsets_loaded,
3894 &osd->link_xon_tx, &nsd->link_xon_tx);
3895 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3896 pf->stat_offsets_loaded,
3897 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3898 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3899 pf->stat_offsets_loaded,
3900 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3902 /* Packet size stats rx */
3903 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3904 I40E_GLPRT_PRC64L(hw->port),
3905 pf->stat_offsets_loaded,
3906 &osd->rx_size_64, &nsd->rx_size_64);
3907 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3908 I40E_GLPRT_PRC127L(hw->port),
3909 pf->stat_offsets_loaded,
3910 &osd->rx_size_127, &nsd->rx_size_127);
3911 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3912 I40E_GLPRT_PRC255L(hw->port),
3913 pf->stat_offsets_loaded,
3914 &osd->rx_size_255, &nsd->rx_size_255);
3915 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3916 I40E_GLPRT_PRC511L(hw->port),
3917 pf->stat_offsets_loaded,
3918 &osd->rx_size_511, &nsd->rx_size_511);
3919 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3920 I40E_GLPRT_PRC1023L(hw->port),
3921 pf->stat_offsets_loaded,
3922 &osd->rx_size_1023, &nsd->rx_size_1023);
3923 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3924 I40E_GLPRT_PRC1522L(hw->port),
3925 pf->stat_offsets_loaded,
3926 &osd->rx_size_1522, &nsd->rx_size_1522);
3927 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3928 I40E_GLPRT_PRC9522L(hw->port),
3929 pf->stat_offsets_loaded,
3930 &osd->rx_size_big, &nsd->rx_size_big);
3932 /* Packet size stats tx */
3933 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3934 I40E_GLPRT_PTC64L(hw->port),
3935 pf->stat_offsets_loaded,
3936 &osd->tx_size_64, &nsd->tx_size_64);
3937 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3938 I40E_GLPRT_PTC127L(hw->port),
3939 pf->stat_offsets_loaded,
3940 &osd->tx_size_127, &nsd->tx_size_127);
3941 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3942 I40E_GLPRT_PTC255L(hw->port),
3943 pf->stat_offsets_loaded,
3944 &osd->tx_size_255, &nsd->tx_size_255);
3945 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3946 I40E_GLPRT_PTC511L(hw->port),
3947 pf->stat_offsets_loaded,
3948 &osd->tx_size_511, &nsd->tx_size_511);
3949 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3950 I40E_GLPRT_PTC1023L(hw->port),
3951 pf->stat_offsets_loaded,
3952 &osd->tx_size_1023, &nsd->tx_size_1023);
3953 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3954 I40E_GLPRT_PTC1522L(hw->port),
3955 pf->stat_offsets_loaded,
3956 &osd->tx_size_1522, &nsd->tx_size_1522);
3957 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3958 I40E_GLPRT_PTC9522L(hw->port),
3959 pf->stat_offsets_loaded,
3960 &osd->tx_size_big, &nsd->tx_size_big);
3962 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3963 pf->stat_offsets_loaded,
3964 &osd->rx_undersize, &nsd->rx_undersize);
3965 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3966 pf->stat_offsets_loaded,
3967 &osd->rx_fragments, &nsd->rx_fragments);
3968 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3969 pf->stat_offsets_loaded,
3970 &osd->rx_oversize, &nsd->rx_oversize);
3971 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3972 pf->stat_offsets_loaded,
3973 &osd->rx_jabber, &nsd->rx_jabber);
3974 pf->stat_offsets_loaded = true;
3977 /* Update vsi stats */
3978 ixl_update_vsi_stats(vsi);
3980 for (int i = 0; i < pf->num_vfs; i++) {
3982 if (vf->vf_flags & VF_FLAG_ENABLED)
3983 ixl_update_eth_stats(&pf->vfs[i].vsi);
3988 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
3990 struct i40e_hw *hw = &pf->hw;
3991 struct ixl_vsi *vsi = &pf->vsi;
3992 device_t dev = pf->dev;
3999 ixl_teardown_queue_msix(vsi);
4001 error = i40e_shutdown_lan_hmc(hw);
4004 "Shutdown LAN HMC failed with code %d\n", error);
4006 ixl_disable_intr0(hw);
4007 ixl_teardown_adminq_msix(pf);
4009 error = i40e_shutdown_adminq(hw);
4012 "Shutdown Admin queue failed with code %d\n", error);
4014 callout_drain(&pf->timer);
4016 /* Free ring buffers, locks and filters */
4017 ixl_vsi_free_queues(vsi);
4019 /* Free VSI filter list */
4020 ixl_free_mac_filters(vsi);
4022 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
4028 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
4030 struct i40e_hw *hw = &pf->hw;
4031 struct ixl_vsi *vsi = &pf->vsi;
4032 device_t dev = pf->dev;
4035 device_printf(dev, "Rebuilding driver state...\n");
4037 error = i40e_pf_reset(hw);
4039 device_printf(dev, "PF reset failure %s\n",
4040 i40e_stat_str(hw, error));
4041 goto ixl_rebuild_hw_structs_after_reset_err;
4045 error = i40e_init_adminq(hw);
4046 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
4047 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
4049 goto ixl_rebuild_hw_structs_after_reset_err;
4052 i40e_clear_pxe_mode(hw);
4054 error = ixl_get_hw_capabilities(pf);
4056 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
4057 goto ixl_rebuild_hw_structs_after_reset_err;
4060 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4061 hw->func_caps.num_rx_qp, 0, 0);
4063 device_printf(dev, "init_lan_hmc failed: %d\n", error);
4064 goto ixl_rebuild_hw_structs_after_reset_err;
4067 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4069 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
4070 goto ixl_rebuild_hw_structs_after_reset_err;
4073 /* reserve a contiguous allocation for the PF's VSI */
4074 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
4076 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
4078 /* TODO: error handling */
4081 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
4082 pf->qtag.num_allocated, pf->qtag.num_active);
4084 error = ixl_switch_config(pf);
4086 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
4088 goto ixl_rebuild_hw_structs_after_reset_err;
4091 if (ixl_vsi_setup_queues(vsi)) {
4092 device_printf(dev, "setup queues failed!\n");
4094 goto ixl_rebuild_hw_structs_after_reset_err;
4098 error = ixl_setup_adminq_msix(pf);
4100 device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
4102 goto ixl_rebuild_hw_structs_after_reset_err;
4105 ixl_configure_intr0_msix(pf);
4106 ixl_enable_intr0(hw);
4108 error = ixl_setup_queue_msix(vsi);
4110 device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4112 goto ixl_rebuild_hw_structs_after_reset_err;
4115 error = ixl_setup_legacy(pf);
4117 device_printf(dev, "ixl_setup_legacy() error: %d\n",
4119 goto ixl_rebuild_hw_structs_after_reset_err;
4123 /* Determine link state */
4124 if (ixl_attach_get_link_status(pf)) {
4126 /* TODO: error handling */
4129 i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
4130 ixl_get_fw_lldp_status(pf);
4135 device_printf(dev, "Rebuilding driver state done.\n");
4138 ixl_rebuild_hw_structs_after_reset_err:
4139 device_printf(dev, "Reload the driver to recover\n");
4144 ixl_handle_empr_reset(struct ixl_pf *pf)
4146 struct ixl_vsi *vsi = &pf->vsi;
4147 struct i40e_hw *hw = &pf->hw;
4148 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
4152 ixl_prepare_for_reset(pf, is_up);
4154 /* Typically finishes within 3-4 seconds */
4155 while (count++ < 100) {
4156 reg = rd32(hw, I40E_GLGEN_RSTAT)
4157 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
4159 i40e_msec_delay(100);
4163 ixl_dbg(pf, IXL_DBG_INFO,
4164 "EMPR reset wait count: %d\n", count);
4166 ixl_rebuild_hw_structs_after_reset(pf, is_up);
4168 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4172 ** Tasklet handler for MSIX Adminq interrupts
4173 ** - do outside interrupt since it might sleep
4176 ixl_do_adminq(void *context, int pending)
4178 struct ixl_pf *pf = context;
4179 struct i40e_hw *hw = &pf->hw;
4180 struct i40e_arq_event_info event;
4182 device_t dev = pf->dev;
4186 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4187 /* Flag cleared at end of this function */
4188 ixl_handle_empr_reset(pf);
4192 /* Admin Queue handling */
4193 event.buf_len = IXL_AQ_BUF_SZ;
4194 event.msg_buf = malloc(event.buf_len,
4195 M_DEVBUF, M_NOWAIT | M_ZERO);
4196 if (!event.msg_buf) {
4197 device_printf(dev, "%s: Unable to allocate memory for Admin"
4198 " Queue event!\n", __func__);
4203 /* clean and process any events */
4205 ret = i40e_clean_arq_element(hw, &event, &result);
4208 opcode = LE16_TO_CPU(event.desc.opcode);
4209 ixl_dbg(pf, IXL_DBG_AQ,
4210 "Admin Queue event: %#06x\n", opcode);
4212 case i40e_aqc_opc_get_link_status:
4213 ixl_link_event(pf, &event);
4215 case i40e_aqc_opc_send_msg_to_pf:
4217 ixl_handle_vf_msg(pf, &event);
4220 case i40e_aqc_opc_event_lan_overflow:
4225 } while (result && (loop++ < IXL_ADM_LIMIT));
4227 free(event.msg_buf, M_DEVBUF);
4230 * If there are still messages to process, reschedule ourselves.
4231 * Otherwise, re-enable our interrupt.
4234 taskqueue_enqueue(pf->tq, &pf->adminq);
4236 ixl_enable_intr0(hw);
4242 * Update VSI-specific ethernet statistics counters.
4245 ixl_update_eth_stats(struct ixl_vsi *vsi)
4247 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4248 struct i40e_hw *hw = &pf->hw;
4249 struct i40e_eth_stats *es;
4250 struct i40e_eth_stats *oes;
4251 struct i40e_hw_port_stats *nsd;
4252 u16 stat_idx = vsi->info.stat_counter_idx;
4254 es = &vsi->eth_stats;
4255 oes = &vsi->eth_stats_offsets;
4258 /* Gather up the stats that the hw collects */
4259 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4260 vsi->stat_offsets_loaded,
4261 &oes->tx_errors, &es->tx_errors);
4262 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4263 vsi->stat_offsets_loaded,
4264 &oes->rx_discards, &es->rx_discards);
4266 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4267 I40E_GLV_GORCL(stat_idx),
4268 vsi->stat_offsets_loaded,
4269 &oes->rx_bytes, &es->rx_bytes);
4270 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4271 I40E_GLV_UPRCL(stat_idx),
4272 vsi->stat_offsets_loaded,
4273 &oes->rx_unicast, &es->rx_unicast);
4274 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4275 I40E_GLV_MPRCL(stat_idx),
4276 vsi->stat_offsets_loaded,
4277 &oes->rx_multicast, &es->rx_multicast);
4278 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4279 I40E_GLV_BPRCL(stat_idx),
4280 vsi->stat_offsets_loaded,
4281 &oes->rx_broadcast, &es->rx_broadcast);
4283 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4284 I40E_GLV_GOTCL(stat_idx),
4285 vsi->stat_offsets_loaded,
4286 &oes->tx_bytes, &es->tx_bytes);
4287 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4288 I40E_GLV_UPTCL(stat_idx),
4289 vsi->stat_offsets_loaded,
4290 &oes->tx_unicast, &es->tx_unicast);
4291 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4292 I40E_GLV_MPTCL(stat_idx),
4293 vsi->stat_offsets_loaded,
4294 &oes->tx_multicast, &es->tx_multicast);
4295 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4296 I40E_GLV_BPTCL(stat_idx),
4297 vsi->stat_offsets_loaded,
4298 &oes->tx_broadcast, &es->tx_broadcast);
4299 vsi->stat_offsets_loaded = true;
4303 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4307 struct i40e_eth_stats *es;
4310 struct i40e_hw_port_stats *nsd;
4314 es = &vsi->eth_stats;
4317 ixl_update_eth_stats(vsi);
4319 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4320 for (int i = 0; i < vsi->num_queues; i++)
4321 tx_discards += vsi->queues[i].txr.br->br_drops;
4323 /* Update ifnet stats */
4324 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4327 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4330 IXL_SET_IBYTES(vsi, es->rx_bytes);
4331 IXL_SET_OBYTES(vsi, es->tx_bytes);
4332 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4333 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4335 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4336 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4338 IXL_SET_OERRORS(vsi, es->tx_errors);
4339 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4340 IXL_SET_OQDROPS(vsi, tx_discards);
4341 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4342 IXL_SET_COLLISIONS(vsi, 0);
4346 * Reset all of the stats for the given pf
4349 ixl_pf_reset_stats(struct ixl_pf *pf)
4351 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4352 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4353 pf->stat_offsets_loaded = false;
4357 * Resets all stats of the given vsi
4360 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4362 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4363 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4364 vsi->stat_offsets_loaded = false;
4368 * Read and update a 48 bit stat from the hw
4370 * Since the device stats are not reset at PFReset, they likely will not
4371 * be zeroed when the driver starts. We'll save the first values read
4372 * and use them as offsets to be subtracted from the raw values in order
4373 * to report stats that count from zero.
4376 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4377 bool offset_loaded, u64 *offset, u64 *stat)
4381 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4382 new_data = rd64(hw, loreg);
4385 * Use two rd32's instead of one rd64; FreeBSD versions before
4386 * 10 don't support 64-bit bus reads/writes.
4388 new_data = rd32(hw, loreg);
4389 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4394 if (new_data >= *offset)
4395 *stat = new_data - *offset;
4397 *stat = (new_data + ((u64)1 << 48)) - *offset;
4398 *stat &= 0xFFFFFFFFFFFFULL;
4402 * Read and update a 32 bit stat from the hw
4405 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4406 bool offset_loaded, u64 *offset, u64 *stat)
4410 new_data = rd32(hw, reg);
4413 if (new_data >= *offset)
4414 *stat = (u32)(new_data - *offset);
4416 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4420 ixl_add_device_sysctls(struct ixl_pf *pf)
4422 device_t dev = pf->dev;
4423 struct i40e_hw *hw = &pf->hw;
4425 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4426 struct sysctl_oid_list *ctx_list =
4427 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4429 struct sysctl_oid *debug_node;
4430 struct sysctl_oid_list *debug_list;
4432 struct sysctl_oid *fec_node;
4433 struct sysctl_oid_list *fec_list;
4435 /* Set up sysctls */
4436 SYSCTL_ADD_PROC(ctx, ctx_list,
4437 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4438 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4440 SYSCTL_ADD_PROC(ctx, ctx_list,
4441 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4442 pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4444 SYSCTL_ADD_PROC(ctx, ctx_list,
4445 OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
4446 pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
4448 SYSCTL_ADD_PROC(ctx, ctx_list,
4449 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4450 pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
4452 SYSCTL_ADD_PROC(ctx, ctx_list,
4453 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4454 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4456 SYSCTL_ADD_PROC(ctx, ctx_list,
4457 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4458 pf, 0, ixl_sysctl_unallocated_queues, "I",
4459 "Queues not allocated to a PF or VF");
4461 SYSCTL_ADD_PROC(ctx, ctx_list,
4462 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4463 pf, 0, ixl_sysctl_pf_tx_itr, "I",
4464 "Immediately set TX ITR value for all queues");
4466 SYSCTL_ADD_PROC(ctx, ctx_list,
4467 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4468 pf, 0, ixl_sysctl_pf_rx_itr, "I",
4469 "Immediately set RX ITR value for all queues");
4471 SYSCTL_ADD_INT(ctx, ctx_list,
4472 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4473 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4475 SYSCTL_ADD_INT(ctx, ctx_list,
4476 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4477 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4479 SYSCTL_ADD_INT(ctx, ctx_list,
4480 OID_AUTO, "tx_ring_size", CTLFLAG_RD,
4481 &pf->vsi.num_tx_desc, 0, "TX ring size");
4483 SYSCTL_ADD_INT(ctx, ctx_list,
4484 OID_AUTO, "rx_ring_size", CTLFLAG_RD,
4485 &pf->vsi.num_rx_desc, 0, "RX ring size");
4487 /* Add FEC sysctls for 25G adapters */
4488 if (i40e_is_25G_device(hw->device_id)) {
4489 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4490 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4491 fec_list = SYSCTL_CHILDREN(fec_node);
4493 SYSCTL_ADD_PROC(ctx, fec_list,
4494 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
4495 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4497 SYSCTL_ADD_PROC(ctx, fec_list,
4498 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
4499 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4501 SYSCTL_ADD_PROC(ctx, fec_list,
4502 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
4503 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4505 SYSCTL_ADD_PROC(ctx, fec_list,
4506 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
4507 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4509 SYSCTL_ADD_PROC(ctx, fec_list,
4510 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
4511 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4514 SYSCTL_ADD_PROC(ctx, ctx_list,
4515 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
4516 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
4518 /* Add sysctls meant to print debug information, but don't list them
4519 * in "sysctl -a" output. */
4520 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4521 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4522 debug_list = SYSCTL_CHILDREN(debug_node);
4524 SYSCTL_ADD_UINT(ctx, debug_list,
4525 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4526 &pf->hw.debug_mask, 0, "Shared code debug message level");
4528 SYSCTL_ADD_UINT(ctx, debug_list,
4529 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4530 &pf->dbg_mask, 0, "Non-hared code debug message level");
4532 SYSCTL_ADD_PROC(ctx, debug_list,
4533 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4534 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4536 SYSCTL_ADD_PROC(ctx, debug_list,
4537 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4538 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4540 SYSCTL_ADD_PROC(ctx, debug_list,
4541 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4542 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4544 SYSCTL_ADD_PROC(ctx, debug_list,
4545 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4546 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4548 SYSCTL_ADD_PROC(ctx, debug_list,
4549 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4550 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4552 SYSCTL_ADD_PROC(ctx, debug_list,
4553 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4554 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4556 SYSCTL_ADD_PROC(ctx, debug_list,
4557 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4558 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4560 SYSCTL_ADD_PROC(ctx, debug_list,
4561 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4562 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4564 SYSCTL_ADD_PROC(ctx, debug_list,
4565 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4566 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4568 SYSCTL_ADD_PROC(ctx, debug_list,
4569 OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
4570 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
4573 SYSCTL_ADD_PROC(ctx, debug_list,
4574 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4575 pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4577 SYSCTL_ADD_PROC(ctx, debug_list,
4578 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4579 pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4583 SYSCTL_ADD_UINT(ctx, debug_list,
4584 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4585 0, "PF/VF Virtual Channel debug level");
4590 * Primarily for finding out how many queues can be assigned to VFs,
4594 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4596 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4600 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4603 return sysctl_handle_int(oidp, NULL, queues, req);
4607 ** Set flow control using sysctl:
4614 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4616 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4617 struct i40e_hw *hw = &pf->hw;
4618 device_t dev = pf->dev;
4619 int requested_fc, error = 0;
4620 enum i40e_status_code aq_error = 0;
4624 requested_fc = pf->fc;
4625 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4626 if ((error) || (req->newptr == NULL))
4628 if (requested_fc < 0 || requested_fc > 3) {
4630 "Invalid fc mode; valid modes are 0 through 3\n");
4634 /* Set fc ability for port */
4635 hw->fc.requested_mode = requested_fc;
4636 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4639 "%s: Error setting new fc mode %d; fc_err %#x\n",
4640 __func__, aq_error, fc_aq_err);
4643 pf->fc = requested_fc;
4645 /* Get new link state */
4646 i40e_msec_delay(250);
4647 hw->phy.get_link_info = TRUE;
4648 i40e_get_link_status(hw, &pf->link_up);
4654 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4668 switch (link_speed) {
4669 case I40E_LINK_SPEED_100MB:
4672 case I40E_LINK_SPEED_1GB:
4675 case I40E_LINK_SPEED_10GB:
4678 case I40E_LINK_SPEED_40GB:
4681 case I40E_LINK_SPEED_20GB:
4684 case I40E_LINK_SPEED_25GB:
4687 case I40E_LINK_SPEED_UNKNOWN:
4693 return speeds[index];
4697 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
4699 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4700 struct i40e_hw *hw = &pf->hw;
4703 ixl_update_link_status(pf);
4705 error = sysctl_handle_string(oidp,
4706 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4712 * Converts 8-bit speeds value to and from sysctl flags and
4713 * Admin Queue flags.
4716 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4718 static u16 speedmap[6] = {
4719 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4720 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
4721 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
4722 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
4723 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
4724 (I40E_LINK_SPEED_40GB | (0x20 << 8))
4728 for (int i = 0; i < 6; i++) {
4730 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4732 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4739 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
4741 struct i40e_hw *hw = &pf->hw;
4742 device_t dev = pf->dev;
4743 struct i40e_aq_get_phy_abilities_resp abilities;
4744 struct i40e_aq_set_phy_config config;
4745 enum i40e_status_code aq_error = 0;
4747 /* Get current capability information */
4748 aq_error = i40e_aq_get_phy_capabilities(hw,
4749 FALSE, FALSE, &abilities, NULL);
4752 "%s: Error getting phy capabilities %d,"
4753 " aq error: %d\n", __func__, aq_error,
4754 hw->aq.asq_last_status);
4758 /* Prepare new config */
4759 bzero(&config, sizeof(config));
4761 config.link_speed = speeds;
4763 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4764 config.phy_type = abilities.phy_type;
4765 config.phy_type_ext = abilities.phy_type_ext;
4766 config.abilities = abilities.abilities
4767 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4768 config.eee_capability = abilities.eee_capability;
4769 config.eeer = abilities.eeer_val;
4770 config.low_power_ctrl = abilities.d3_lpan;
4771 config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
4773 /* Do aq command & restart link */
4774 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4777 "%s: Error setting new phy config %d,"
4778 " aq error: %d\n", __func__, aq_error,
4779 hw->aq.asq_last_status);
4787 ** Supported link speedsL
4797 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
4799 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4800 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
4802 return sysctl_handle_int(oidp, NULL, supported, req);
4806 ** Control link advertise speed:
4808 ** 0x1 - advertise 100 Mb
4809 ** 0x2 - advertise 1G
4810 ** 0x4 - advertise 10G
4811 ** 0x8 - advertise 20G
4812 ** 0x10 - advertise 25G
4813 ** 0x20 - advertise 40G
4815 ** Set to 0 to disable link
4818 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
4820 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4821 device_t dev = pf->dev;
4822 u8 converted_speeds;
4823 int requested_ls = 0;
4826 /* Read in new mode */
4827 requested_ls = pf->advertised_speed;
4828 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4829 if ((error) || (req->newptr == NULL))
4832 /* Error out if bits outside of possible flag range are set */
4833 if ((requested_ls & ~((u8)0x3F)) != 0) {
4834 device_printf(dev, "Input advertised speed out of range; "
4835 "valid flags are: 0x%02x\n",
4836 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4840 /* Check if adapter supports input value */
4841 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4842 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4843 device_printf(dev, "Invalid advertised speed; "
4844 "valid flags are: 0x%02x\n",
4845 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4849 error = ixl_set_advertised_speeds(pf, requested_ls, false);
4853 pf->advertised_speed = requested_ls;
4854 ixl_update_link_status(pf);
4859 * Input: bitmap of enum i40e_aq_link_speed
4862 ixl_max_aq_speed_to_value(u8 link_speeds)
4864 if (link_speeds & I40E_LINK_SPEED_40GB)
4866 if (link_speeds & I40E_LINK_SPEED_25GB)
4868 if (link_speeds & I40E_LINK_SPEED_20GB)
4870 if (link_speeds & I40E_LINK_SPEED_10GB)
4872 if (link_speeds & I40E_LINK_SPEED_1GB)
4874 if (link_speeds & I40E_LINK_SPEED_100MB)
4875 return IF_Mbps(100);
4877 /* Minimum supported link speed */
4878 return IF_Mbps(100);
4882 ** Get the width and transaction speed of
4883 ** the bus this adapter is plugged into.
4886 ixl_get_bus_info(struct ixl_pf *pf)
4888 struct i40e_hw *hw = &pf->hw;
4889 device_t dev = pf->dev;
4891 u32 offset, num_ports;
4894 /* Some devices don't use PCIE */
4895 if (hw->mac.type == I40E_MAC_X722)
4898 /* Read PCI Express Capabilities Link Status Register */
4899 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4900 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4902 /* Fill out hw struct with PCIE info */
4903 i40e_set_pci_config_data(hw, link);
4905 /* Use info to print out bandwidth messages */
4906 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4907 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4908 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4909 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4910 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4911 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4912 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4913 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4917 * If adapter is in slot with maximum supported speed,
4918 * no warning message needs to be printed out.
4920 if (hw->bus.speed >= i40e_bus_speed_8000
4921 && hw->bus.width >= i40e_bus_width_pcie_x8)
4924 num_ports = bitcount32(hw->func_caps.valid_functions);
4925 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4927 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4928 device_printf(dev, "PCI-Express bandwidth available"
4929 " for this device may be insufficient for"
4930 " optimal performance.\n");
4931 device_printf(dev, "Please move the device to a different"
4932 " PCI-e link with more lanes and/or higher"
4933 " transfer rate.\n");
4938 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4940 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4941 struct i40e_hw *hw = &pf->hw;
4944 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4945 ixl_nvm_version_str(hw, sbuf);
4953 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4955 if ((nvma->command == I40E_NVM_READ) &&
4956 ((nvma->config & 0xFF) == 0xF) &&
4957 (((nvma->config & 0xF00) >> 8) == 0xF) &&
4958 (nvma->offset == 0) &&
4959 (nvma->data_size == 1)) {
4960 // device_printf(dev, "- Get Driver Status Command\n");
4962 else if (nvma->command == I40E_NVM_READ) {
4966 switch (nvma->command) {
4968 device_printf(dev, "- command: I40E_NVM_READ\n");
4971 device_printf(dev, "- command: I40E_NVM_WRITE\n");
4974 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4978 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
4979 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4980 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4981 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4986 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4988 struct i40e_hw *hw = &pf->hw;
4989 struct i40e_nvm_access *nvma;
4990 device_t dev = pf->dev;
4991 enum i40e_status_code status = 0;
4994 DEBUGFUNC("ixl_handle_nvmupd_cmd");
4997 if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4998 ifd->ifd_data == NULL) {
4999 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
5001 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
5002 __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
5003 device_printf(dev, "%s: data pointer: %p\n", __func__,
5008 nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5010 if (pf->dbg_mask & IXL_DBG_NVMUPD)
5011 ixl_print_nvm_cmd(dev, nvma);
5013 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5015 while (count++ < 100) {
5016 i40e_msec_delay(100);
5017 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5022 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5024 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5030 /* Let the nvmupdate report errors, show them only when debug is enabled */
5031 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
5032 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
5033 i40e_stat_str(hw, status), perrno);
5036 * -EPERM is actually ERESTART, which the kernel interprets as it needing
5037 * to run this ioctl again. So use -EACCES for -EPERM instead.
5039 if (perrno == -EPERM)
5045 /*********************************************************************
5047 * Media Ioctl callback
5049 * This routine is called whenever the user queries the status of
5050 * the interface using ifconfig.
5052 * When adding new media types here, make sure to add them to
5053 * ixl_add_ifmedia(), too.
5055 **********************************************************************/
5057 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
5059 struct ixl_vsi *vsi = ifp->if_softc;
5060 struct ixl_pf *pf = vsi->back;
5061 struct i40e_hw *hw = &pf->hw;
5063 INIT_DEBUGOUT("ixl_media_status: begin");
5065 /* Don't touch PF during reset */
5066 if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
5071 i40e_get_link_status(hw, &pf->link_up);
5072 ixl_update_link_status(pf);
5074 ifmr->ifm_status = IFM_AVALID;
5075 ifmr->ifm_active = IFM_ETHER;
5082 ifmr->ifm_status |= IFM_ACTIVE;
5084 /* Hardware always does full-duplex */
5085 ifmr->ifm_active |= IFM_FDX;
5087 switch (hw->phy.link_info.phy_type) {
5089 case I40E_PHY_TYPE_100BASE_TX:
5090 ifmr->ifm_active |= IFM_100_TX;
5093 case I40E_PHY_TYPE_1000BASE_T:
5094 ifmr->ifm_active |= IFM_1000_T;
5096 case I40E_PHY_TYPE_1000BASE_SX:
5097 ifmr->ifm_active |= IFM_1000_SX;
5099 case I40E_PHY_TYPE_1000BASE_LX:
5100 ifmr->ifm_active |= IFM_1000_LX;
5102 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
5103 ifmr->ifm_active |= IFM_1000_T;
5106 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
5107 ifmr->ifm_active |= IFM_10G_TWINAX;
5109 case I40E_PHY_TYPE_10GBASE_SR:
5110 ifmr->ifm_active |= IFM_10G_SR;
5112 case I40E_PHY_TYPE_10GBASE_LR:
5113 ifmr->ifm_active |= IFM_10G_LR;
5115 case I40E_PHY_TYPE_10GBASE_T:
5116 ifmr->ifm_active |= IFM_10G_T;
5118 case I40E_PHY_TYPE_XAUI:
5119 case I40E_PHY_TYPE_XFI:
5120 ifmr->ifm_active |= IFM_10G_TWINAX;
5122 case I40E_PHY_TYPE_10GBASE_AOC:
5123 ifmr->ifm_active |= IFM_10G_AOC;
5126 case I40E_PHY_TYPE_25GBASE_KR:
5127 ifmr->ifm_active |= IFM_25G_KR;
5129 case I40E_PHY_TYPE_25GBASE_CR:
5130 ifmr->ifm_active |= IFM_25G_CR;
5132 case I40E_PHY_TYPE_25GBASE_SR:
5133 ifmr->ifm_active |= IFM_25G_SR;
5135 case I40E_PHY_TYPE_25GBASE_LR:
5136 ifmr->ifm_active |= IFM_25G_LR;
5138 case I40E_PHY_TYPE_25GBASE_AOC:
5139 ifmr->ifm_active |= IFM_25G_AOC;
5141 case I40E_PHY_TYPE_25GBASE_ACC:
5142 ifmr->ifm_active |= IFM_25G_ACC;
5145 case I40E_PHY_TYPE_40GBASE_CR4:
5146 case I40E_PHY_TYPE_40GBASE_CR4_CU:
5147 ifmr->ifm_active |= IFM_40G_CR4;
5149 case I40E_PHY_TYPE_40GBASE_SR4:
5150 ifmr->ifm_active |= IFM_40G_SR4;
5152 case I40E_PHY_TYPE_40GBASE_LR4:
5153 ifmr->ifm_active |= IFM_40G_LR4;
5155 case I40E_PHY_TYPE_XLAUI:
5156 ifmr->ifm_active |= IFM_OTHER;
5158 case I40E_PHY_TYPE_1000BASE_KX:
5159 ifmr->ifm_active |= IFM_1000_KX;
5161 case I40E_PHY_TYPE_SGMII:
5162 ifmr->ifm_active |= IFM_1000_SGMII;
5164 /* ERJ: What's the difference between these? */
5165 case I40E_PHY_TYPE_10GBASE_CR1_CU:
5166 case I40E_PHY_TYPE_10GBASE_CR1:
5167 ifmr->ifm_active |= IFM_10G_CR1;
5169 case I40E_PHY_TYPE_10GBASE_KX4:
5170 ifmr->ifm_active |= IFM_10G_KX4;
5172 case I40E_PHY_TYPE_10GBASE_KR:
5173 ifmr->ifm_active |= IFM_10G_KR;
5175 case I40E_PHY_TYPE_SFI:
5176 ifmr->ifm_active |= IFM_10G_SFI;
5178 /* Our single 20G media type */
5179 case I40E_PHY_TYPE_20GBASE_KR2:
5180 ifmr->ifm_active |= IFM_20G_KR2;
5182 case I40E_PHY_TYPE_40GBASE_KR4:
5183 ifmr->ifm_active |= IFM_40G_KR4;
5185 case I40E_PHY_TYPE_XLPPI:
5186 case I40E_PHY_TYPE_40GBASE_AOC:
5187 ifmr->ifm_active |= IFM_40G_XLPPI;
5189 /* Unknown to driver */
5191 ifmr->ifm_active |= IFM_UNKNOWN;
5194 /* Report flow control status as well */
5195 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
5196 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
5197 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
5198 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
5206 struct ixl_pf *pf = arg;
5209 ixl_init_locked(pf);
5214 * NOTE: Fortville does not support forcing media speeds. Instead,
5215 * use the set_advertise sysctl to set the speeds Fortville
5216 * will advertise or be allowed to operate at.
5219 ixl_media_change(struct ifnet * ifp)
5221 struct ixl_vsi *vsi = ifp->if_softc;
5222 struct ifmedia *ifm = &vsi->media;
5224 INIT_DEBUGOUT("ixl_media_change: begin");
5226 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5229 if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5234 /*********************************************************************
5237 * ixl_ioctl is called when the user wants to configure the
5240 * return 0 on success, positive on failure
5241 **********************************************************************/
5244 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5246 struct ixl_vsi *vsi = ifp->if_softc;
5247 struct ixl_pf *pf = vsi->back;
5248 struct ifreq *ifr = (struct ifreq *)data;
5249 struct ifdrv *ifd = (struct ifdrv *)data;
5250 #if defined(INET) || defined(INET6)
5251 struct ifaddr *ifa = (struct ifaddr *)data;
5252 bool avoid_reset = FALSE;
5259 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5261 if (ifa->ifa_addr->sa_family == AF_INET)
5265 if (ifa->ifa_addr->sa_family == AF_INET6)
5268 #if defined(INET) || defined(INET6)
5270 ** Calling init results in link renegotiation,
5271 ** so we avoid doing it when possible.
5274 ifp->if_flags |= IFF_UP;
5275 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5278 if (!(ifp->if_flags & IFF_NOARP))
5279 arp_ifinit(ifp, ifa);
5282 error = ether_ioctl(ifp, command, data);
5286 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5287 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5288 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5292 ifp->if_mtu = ifr->ifr_mtu;
5293 vsi->max_frame_size =
5294 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5295 + ETHER_VLAN_ENCAP_LEN;
5296 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5297 ixl_init_locked(pf);
5302 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5304 if (ifp->if_flags & IFF_UP) {
5305 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5306 if ((ifp->if_flags ^ pf->if_flags) &
5307 (IFF_PROMISC | IFF_ALLMULTI)) {
5308 ixl_set_promisc(vsi);
5316 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5317 ixl_stop_locked(pf);
5320 pf->if_flags = ifp->if_flags;
5325 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5328 /* NVM update command */
5329 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5330 error = ixl_handle_nvmupd_cmd(pf, ifd);
5335 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5336 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5338 ixl_disable_rings_intr(vsi);
5340 ixl_enable_intr(vsi);
5345 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5346 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5348 ixl_disable_rings_intr(vsi);
5350 ixl_enable_intr(vsi);
5357 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5358 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5362 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5363 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5365 ixl_cap_txcsum_tso(vsi, ifp, mask);
5367 if (mask & IFCAP_RXCSUM)
5368 ifp->if_capenable ^= IFCAP_RXCSUM;
5369 if (mask & IFCAP_RXCSUM_IPV6)
5370 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5371 if (mask & IFCAP_LRO)
5372 ifp->if_capenable ^= IFCAP_LRO;
5373 if (mask & IFCAP_VLAN_HWTAGGING)
5374 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5375 if (mask & IFCAP_VLAN_HWFILTER)
5376 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5377 if (mask & IFCAP_VLAN_HWTSO)
5378 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5379 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5381 ixl_init_locked(pf);
5384 VLAN_CAPABILITIES(ifp);
5388 #if __FreeBSD_version >= 1003000
5391 struct ifi2creq i2c;
5394 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5398 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
5401 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5405 if (i2c.len > sizeof(i2c.data)) {
5410 for (i = 0; i < i2c.len; i++)
5411 if (ixl_read_i2c_byte(pf, i2c.offset + i,
5412 i2c.dev_addr, &i2c.data[i]))
5415 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
5420 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5421 error = ether_ioctl(ifp, command, data);
5429 ixl_find_i2c_interface(struct ixl_pf *pf)
5431 struct i40e_hw *hw = &pf->hw;
5432 bool i2c_en, port_matched;
5435 for (int i = 0; i < 4; i++) {
5436 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5437 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5438 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5439 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5441 if (i2c_en && port_matched)
5449 ixl_phy_type_string(u32 bit_pos, bool ext)
5451 static char * phy_types_str[32] = {
5481 "1000BASE-T Optical",
5485 static char * ext_phy_types_str[8] = {
5496 if (ext && bit_pos > 7) return "Invalid_Ext";
5497 if (bit_pos > 31) return "Invalid";
5499 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5503 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5505 device_t dev = pf->dev;
5506 struct i40e_hw *hw = &pf->hw;
5507 struct i40e_aq_desc desc;
5508 enum i40e_status_code status;
5510 struct i40e_aqc_get_link_status *aq_link_status =
5511 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5513 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5514 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5515 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5518 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5519 __func__, i40e_stat_str(hw, status),
5520 i40e_aq_str(hw, hw->aq.asq_last_status));
5524 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5529 ixl_phy_type_string_ls(u8 val)
5532 return ixl_phy_type_string(val - 0x1F, true);
5534 return ixl_phy_type_string(val, false);
5538 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5540 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5541 device_t dev = pf->dev;
5545 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5547 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5551 struct i40e_aqc_get_link_status link_status;
5552 error = ixl_aq_get_link_status(pf, &link_status);
5558 sbuf_printf(buf, "\n"
5559 "PHY Type : 0x%02x<%s>\n"
5561 "Link info: 0x%02x\n"
5562 "AN info : 0x%02x\n"
5563 "Ext info : 0x%02x\n"
5564 "Loopback : 0x%02x\n"
5568 link_status.phy_type,
5569 ixl_phy_type_string_ls(link_status.phy_type),
5570 link_status.link_speed,
5571 link_status.link_info,
5572 link_status.an_info,
5573 link_status.ext_info,
5574 link_status.loopback,
5575 link_status.max_frame_size,
5577 link_status.power_desc);
5579 error = sbuf_finish(buf);
5581 device_printf(dev, "Error finishing sbuf: %d\n", error);
5588 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5590 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5591 struct i40e_hw *hw = &pf->hw;
5592 device_t dev = pf->dev;
5593 enum i40e_status_code status;
5594 struct i40e_aq_get_phy_abilities_resp abilities;
5598 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5600 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5604 status = i40e_aq_get_phy_capabilities(hw,
5605 FALSE, FALSE, &abilities, NULL);
5608 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5609 __func__, i40e_stat_str(hw, status),
5610 i40e_aq_str(hw, hw->aq.asq_last_status));
5615 sbuf_printf(buf, "\n"
5617 abilities.phy_type);
5619 if (abilities.phy_type != 0) {
5620 sbuf_printf(buf, "<");
5621 for (int i = 0; i < 32; i++)
5622 if ((1 << i) & abilities.phy_type)
5623 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5624 sbuf_printf(buf, ">\n");
5627 sbuf_printf(buf, "PHY Ext : %02x",
5628 abilities.phy_type_ext);
5630 if (abilities.phy_type_ext != 0) {
5631 sbuf_printf(buf, "<");
5632 for (int i = 0; i < 4; i++)
5633 if ((1 << i) & abilities.phy_type_ext)
5634 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5635 sbuf_printf(buf, ">");
5637 sbuf_printf(buf, "\n");
5645 "ID : %02x %02x %02x %02x\n"
5646 "ModType : %02x %02x %02x\n"
5650 abilities.link_speed,
5651 abilities.abilities, abilities.eee_capability,
5652 abilities.eeer_val, abilities.d3_lpan,
5653 abilities.phy_id[0], abilities.phy_id[1],
5654 abilities.phy_id[2], abilities.phy_id[3],
5655 abilities.module_type[0], abilities.module_type[1],
5656 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
5657 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
5658 abilities.ext_comp_code);
5660 error = sbuf_finish(buf);
5662 device_printf(dev, "Error finishing sbuf: %d\n", error);
5669 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5671 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5672 struct ixl_vsi *vsi = &pf->vsi;
5673 struct ixl_mac_filter *f;
5678 int ftl_counter = 0;
5682 SLIST_FOREACH(f, &vsi->ftl, next) {
5687 sysctl_handle_string(oidp, "(none)", 6, req);
5691 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5692 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5694 sprintf(buf_i++, "\n");
5695 SLIST_FOREACH(f, &vsi->ftl, next) {
5697 MAC_FORMAT ", vlan %4d, flags %#06x",
5698 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5700 /* don't print '\n' for last entry */
5701 if (++ftl_counter != ftl_len) {
5702 sprintf(buf_i, "\n");
5707 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5709 printf("sysctl error: %d\n", error);
5710 free(buf, M_DEVBUF);
5714 #define IXL_SW_RES_SIZE 0x14
5716 ixl_res_alloc_cmp(const void *a, const void *b)
5718 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5719 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5720 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5722 return ((int)one->resource_type - (int)two->resource_type);
5726 * Longest string length: 25
5729 ixl_switch_res_type_string(u8 type)
5731 static char * ixl_switch_res_type_strings[0x14] = {
5734 "Perfect Match MAC address",
5737 "Multicast hash entry",
5738 "Unicast hash entry",
5742 "VLAN Statistic Pool",
5745 "Inner VLAN Forward filter",
5755 return ixl_switch_res_type_strings[type];
5757 return "(Reserved)";
5761 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5763 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5764 struct i40e_hw *hw = &pf->hw;
5765 device_t dev = pf->dev;
5767 enum i40e_status_code status;
5771 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5773 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5775 device_printf(dev, "Could not allocate sbuf for output.\n");
5779 bzero(resp, sizeof(resp));
5780 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5786 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5787 __func__, i40e_stat_str(hw, status),
5788 i40e_aq_str(hw, hw->aq.asq_last_status));
5793 /* Sort entries by type for display */
5794 qsort(resp, num_entries,
5795 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5796 &ixl_res_alloc_cmp);
5798 sbuf_cat(buf, "\n");
5799 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5801 " Type | Guaranteed | Total | Used | Un-allocated\n"
5802 " | (this) | (all) | (this) | (all) \n");
5803 for (int i = 0; i < num_entries; i++) {
5805 "%25s | %10d %5d %6d %12d",
5806 ixl_switch_res_type_string(resp[i].resource_type),
5810 resp[i].total_unalloced);
5811 if (i < num_entries - 1)
5812 sbuf_cat(buf, "\n");
5815 error = sbuf_finish(buf);
5817 device_printf(dev, "Error finishing sbuf: %d\n", error);
5824 ** Caller must init and delete sbuf; this function will clear and
5825 ** finish it for caller.
5827 ** XXX: Cannot use the SEID for this, since there is no longer a
5828 ** fixed mapping between SEID and element type.
5831 ixl_switch_element_string(struct sbuf *s,
5832 struct i40e_aqc_switch_config_element_resp *element)
5836 switch (element->element_type) {
5837 case I40E_AQ_SW_ELEM_TYPE_MAC:
5838 sbuf_printf(s, "MAC %3d", element->element_info);
5840 case I40E_AQ_SW_ELEM_TYPE_PF:
5841 sbuf_printf(s, "PF %3d", element->element_info);
5843 case I40E_AQ_SW_ELEM_TYPE_VF:
5844 sbuf_printf(s, "VF %3d", element->element_info);
5846 case I40E_AQ_SW_ELEM_TYPE_EMP:
5849 case I40E_AQ_SW_ELEM_TYPE_BMC:
5852 case I40E_AQ_SW_ELEM_TYPE_PV:
5855 case I40E_AQ_SW_ELEM_TYPE_VEB:
5858 case I40E_AQ_SW_ELEM_TYPE_PA:
5861 case I40E_AQ_SW_ELEM_TYPE_VSI:
5862 sbuf_printf(s, "VSI %3d", element->element_info);
5870 return sbuf_data(s);
5874 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5876 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5877 struct i40e_hw *hw = &pf->hw;
5878 device_t dev = pf->dev;
5881 enum i40e_status_code status;
5884 u8 aq_buf[I40E_AQ_LARGE_BUF];
5886 struct i40e_aqc_get_switch_config_resp *sw_config;
5887 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5889 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5891 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5895 status = i40e_aq_get_switch_config(hw, sw_config,
5896 sizeof(aq_buf), &next, NULL);
5899 "%s: aq_get_switch_config() error %s, aq error %s\n",
5900 __func__, i40e_stat_str(hw, status),
5901 i40e_aq_str(hw, hw->aq.asq_last_status));
5906 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5909 nmbuf = sbuf_new_auto();
5911 device_printf(dev, "Could not allocate sbuf for name output.\n");
5916 sbuf_cat(buf, "\n");
5917 /* Assuming <= 255 elements in switch */
5918 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5919 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5921 ** Revision -- all elements are revision 1 for now
5924 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5925 " | | | (uplink)\n");
5926 for (int i = 0; i < sw_config->header.num_reported; i++) {
5927 // "%4d (%8s) | %8s %8s %#8x",
5928 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5930 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5931 &sw_config->element[i]));
5932 sbuf_cat(buf, " | ");
5933 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5935 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5937 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5938 if (i < sw_config->header.num_reported - 1)
5939 sbuf_cat(buf, "\n");
5943 error = sbuf_finish(buf);
5945 device_printf(dev, "Error finishing sbuf: %d\n", error);
5953 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5955 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5956 struct i40e_hw *hw = &pf->hw;
5957 device_t dev = pf->dev;
5960 enum i40e_status_code status;
5963 struct i40e_aqc_get_set_rss_key_data key_data;
5965 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5967 device_printf(dev, "Could not allocate sbuf for output.\n");
5971 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5973 sbuf_cat(buf, "\n");
5974 if (hw->mac.type == I40E_MAC_X722) {
5975 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5977 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5978 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5980 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5981 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5982 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
5986 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
5988 error = sbuf_finish(buf);
5990 device_printf(dev, "Error finishing sbuf: %d\n", error);
5997 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
6002 if (length < 1 || buf == NULL) return;
6004 int byte_stride = 16;
6005 int lines = length / byte_stride;
6006 int rem = length % byte_stride;
6010 for (i = 0; i < lines; i++) {
6011 width = (rem > 0 && i == lines - 1)
6012 ? rem : byte_stride;
6014 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
6016 for (j = 0; j < width; j++)
6017 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
6019 if (width < byte_stride) {
6020 for (k = 0; k < (byte_stride - width); k++)
6021 sbuf_printf(sb, " ");
6025 sbuf_printf(sb, "\n");
6029 for (j = 0; j < width; j++) {
6030 c = (char)buf[i * byte_stride + j];
6031 if (c < 32 || c > 126)
6032 sbuf_printf(sb, ".");
6034 sbuf_printf(sb, "%c", c);
6037 sbuf_printf(sb, "\n");
6043 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
6045 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6046 struct i40e_hw *hw = &pf->hw;
6047 device_t dev = pf->dev;
6050 enum i40e_status_code status;
6054 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6056 device_printf(dev, "Could not allocate sbuf for output.\n");
6060 bzero(hlut, sizeof(hlut));
6061 sbuf_cat(buf, "\n");
6062 if (hw->mac.type == I40E_MAC_X722) {
6063 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
6065 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
6066 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6068 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
6069 reg = rd32(hw, I40E_PFQF_HLUT(i));
6070 bcopy(®, &hlut[i << 2], 4);
6073 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
6075 error = sbuf_finish(buf);
6077 device_printf(dev, "Error finishing sbuf: %d\n", error);
6084 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
6086 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6087 struct i40e_hw *hw = &pf->hw;
6090 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
6091 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
6093 return sysctl_handle_long(oidp, NULL, hena, req);
6097 * Sysctl to disable firmware's link management
6099 * 1 - Disable link management on this port
6100 * 0 - Re-enable link management
6102 * On normal NVMs, firmware manages link by default.
6105 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
6107 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6108 struct i40e_hw *hw = &pf->hw;
6109 device_t dev = pf->dev;
6110 int requested_mode = -1;
6111 enum i40e_status_code status = 0;
6114 /* Read in new mode */
6115 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
6116 if ((error) || (req->newptr == NULL))
6118 /* Check for sane value */
6119 if (requested_mode < 0 || requested_mode > 1) {
6120 device_printf(dev, "Valid modes are 0 or 1\n");
6125 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
6128 "%s: Error setting new phy debug mode %s,"
6129 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
6130 i40e_aq_str(hw, hw->aq.asq_last_status));
6138 * Sysctl to read a byte from I2C bus.
6140 * Input: 32-bit value:
6141 * bits 0-7: device address (0xA0 or 0xA2)
6142 * bits 8-15: offset (0-255)
6143 * bits 16-31: unused
6144 * Output: 8-bit value read
6147 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
6149 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6150 device_t dev = pf->dev;
6151 int input = -1, error = 0;
6153 device_printf(dev, "%s: start\n", __func__);
6155 u8 dev_addr, offset, output;
6157 /* Read in I2C read parameters */
6158 error = sysctl_handle_int(oidp, &input, 0, req);
6159 if ((error) || (req->newptr == NULL))
6161 /* Validate device address */
6162 dev_addr = input & 0xFF;
6163 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6166 offset = (input >> 8) & 0xFF;
6168 error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
6172 device_printf(dev, "%02X\n", output);
6177 * Sysctl to write a byte to the I2C bus.
6179 * Input: 32-bit value:
6180 * bits 0-7: device address (0xA0 or 0xA2)
6181 * bits 8-15: offset (0-255)
6182 * bits 16-23: value to write
6183 * bits 24-31: unused
6184 * Output: 8-bit value written
6187 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
6189 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6190 device_t dev = pf->dev;
6191 int input = -1, error = 0;
6193 u8 dev_addr, offset, value;
6195 /* Read in I2C write parameters */
6196 error = sysctl_handle_int(oidp, &input, 0, req);
6197 if ((error) || (req->newptr == NULL))
6199 /* Validate device address */
6200 dev_addr = input & 0xFF;
6201 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6204 offset = (input >> 8) & 0xFF;
6205 value = (input >> 16) & 0xFF;
6207 error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
6211 device_printf(dev, "%02X written\n", value);
6216 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6217 u8 bit_pos, int *is_set)
6219 device_t dev = pf->dev;
6220 struct i40e_hw *hw = &pf->hw;
6221 enum i40e_status_code status;
6223 status = i40e_aq_get_phy_capabilities(hw,
6224 FALSE, FALSE, abilities, NULL);
6227 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
6228 __func__, i40e_stat_str(hw, status),
6229 i40e_aq_str(hw, hw->aq.asq_last_status));
6233 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
6238 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6239 u8 bit_pos, int set)
6241 device_t dev = pf->dev;
6242 struct i40e_hw *hw = &pf->hw;
6243 struct i40e_aq_set_phy_config config;
6244 enum i40e_status_code status;
6246 /* Set new PHY config */
6247 memset(&config, 0, sizeof(config));
6248 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
6250 config.fec_config |= bit_pos;
6251 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
6252 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
6253 config.phy_type = abilities->phy_type;
6254 config.phy_type_ext = abilities->phy_type_ext;
6255 config.link_speed = abilities->link_speed;
6256 config.eee_capability = abilities->eee_capability;
6257 config.eeer = abilities->eeer_val;
6258 config.low_power_ctrl = abilities->d3_lpan;
6259 status = i40e_aq_set_phy_config(hw, &config, NULL);
6263 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
6264 __func__, i40e_stat_str(hw, status),
6265 i40e_aq_str(hw, hw->aq.asq_last_status));
6274 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6276 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6277 int mode, error = 0;
6279 struct i40e_aq_get_phy_abilities_resp abilities;
6280 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
6283 /* Read in new mode */
6284 error = sysctl_handle_int(oidp, &mode, 0, req);
6285 if ((error) || (req->newptr == NULL))
6288 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6292 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6294 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6295 int mode, error = 0;
6297 struct i40e_aq_get_phy_abilities_resp abilities;
6298 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
6301 /* Read in new mode */
6302 error = sysctl_handle_int(oidp, &mode, 0, req);
6303 if ((error) || (req->newptr == NULL))
6306 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6310 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6312 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6313 int mode, error = 0;
6315 struct i40e_aq_get_phy_abilities_resp abilities;
6316 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
6319 /* Read in new mode */
6320 error = sysctl_handle_int(oidp, &mode, 0, req);
6321 if ((error) || (req->newptr == NULL))
6324 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6328 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6330 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6331 int mode, error = 0;
6333 struct i40e_aq_get_phy_abilities_resp abilities;
6334 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
6337 /* Read in new mode */
6338 error = sysctl_handle_int(oidp, &mode, 0, req);
6339 if ((error) || (req->newptr == NULL))
6342 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6346 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6348 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6349 int mode, error = 0;
6351 struct i40e_aq_get_phy_abilities_resp abilities;
6352 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
6355 /* Read in new mode */
6356 error = sysctl_handle_int(oidp, &mode, 0, req);
6357 if ((error) || (req->newptr == NULL))
6360 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6364 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
6366 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6367 struct i40e_hw *hw = &pf->hw;
6368 device_t dev = pf->dev;
6371 enum i40e_status_code status;
6373 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6375 device_printf(dev, "Could not allocate sbuf for output.\n");
6380 /* This amount is only necessary if reading the entire cluster into memory */
6381 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
6382 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
6383 if (final_buff == NULL) {
6384 device_printf(dev, "Could not allocate memory for output.\n");
6387 int final_buff_len = 0;
6393 u16 curr_buff_size = 4096;
6394 u8 curr_next_table = 0;
6395 u32 curr_next_index = 0;
6401 sbuf_cat(buf, "\n");
6404 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
6405 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
6407 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
6408 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6412 /* copy info out of temp buffer */
6413 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
6414 final_buff_len += ret_buff_size;
6416 if (ret_next_table != curr_next_table) {
6417 /* We're done with the current table; we can dump out read data. */
6418 sbuf_printf(buf, "%d:", curr_next_table);
6419 int bytes_printed = 0;
6420 while (bytes_printed <= final_buff_len) {
6421 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
6422 bytes_printed += 16;
6424 sbuf_cat(buf, "\n");
6426 /* The entire cluster has been read; we're finished */
6427 if (ret_next_table == 0xFF)
6430 /* Otherwise clear the output buffer and continue reading */
6431 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
6435 if (ret_next_index == 0xFFFFFFFF)
6438 bzero(dump_buf, sizeof(dump_buf));
6439 curr_next_table = ret_next_table;
6440 curr_next_index = ret_next_index;
6444 free(final_buff, M_DEVBUF);
6446 error = sbuf_finish(buf);
6448 device_printf(dev, "Error finishing sbuf: %d\n", error);
6455 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
6457 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6458 struct i40e_hw *hw = &pf->hw;
6459 device_t dev = pf->dev;
6461 int state, new_state;
6462 enum i40e_status_code status;
6463 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
6465 /* Read in new mode */
6466 error = sysctl_handle_int(oidp, &new_state, 0, req);
6467 if ((error) || (req->newptr == NULL))
6470 /* Already in requested state */
6471 if (new_state == state)
6474 if (new_state == 0) {
6475 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
6476 device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
6480 if (pf->hw.aq.api_maj_ver < 1 ||
6481 (pf->hw.aq.api_maj_ver == 1 &&
6482 pf->hw.aq.api_min_ver < 7)) {
6483 device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
6487 i40e_aq_stop_lldp(&pf->hw, true, NULL);
6488 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
6489 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6491 status = i40e_aq_start_lldp(&pf->hw, NULL);
6492 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
6493 device_printf(dev, "FW LLDP agent is already running\n");
6494 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6501 * Get FW LLDP Agent status
6504 ixl_get_fw_lldp_status(struct ixl_pf *pf)
6506 enum i40e_status_code ret = I40E_SUCCESS;
6507 struct i40e_lldp_variables lldp_cfg;
6508 struct i40e_hw *hw = &pf->hw;
6511 ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
6515 /* Get the LLDP AdminStatus for the current port */
6516 adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
6519 /* Check if LLDP agent is disabled */
6521 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
6522 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6524 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6530 ixl_attach_get_link_status(struct ixl_pf *pf)
6532 struct i40e_hw *hw = &pf->hw;
6533 device_t dev = pf->dev;
6536 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
6537 (hw->aq.fw_maj_ver < 4)) {
6538 i40e_msec_delay(75);
6539 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
6541 device_printf(dev, "link restart failed, aq_err=%d\n",
6542 pf->hw.aq.asq_last_status);
6547 /* Determine link state */
6548 hw->phy.get_link_info = TRUE;
6549 i40e_get_link_status(hw, &pf->link_up);