1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
53 static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
54 static u64 ixl_max_aq_speed_to_value(u8);
55 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
58 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
59 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
60 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
89 extern int ixl_enable_iwarp;
93 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
97 if (!(mask & pf->dbg_mask))
100 /* Re-implement device_printf() */
101 device_print_prettyname(pf->dev);
108 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
111 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
113 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
114 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
115 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
118 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
119 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
120 hw->aq.api_maj_ver, hw->aq.api_min_ver,
121 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
122 IXL_NVM_VERSION_HI_SHIFT,
123 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
124 IXL_NVM_VERSION_LO_SHIFT,
126 oem_ver, oem_build, oem_patch);
130 ixl_print_nvm_version(struct ixl_pf *pf)
132 struct i40e_hw *hw = &pf->hw;
133 device_t dev = pf->dev;
136 sbuf = sbuf_new_auto();
137 ixl_nvm_version_str(hw, sbuf);
139 device_printf(dev, "%s\n", sbuf_data(sbuf));
144 ixl_configure_tx_itr(struct ixl_pf *pf)
146 struct i40e_hw *hw = &pf->hw;
147 struct ixl_vsi *vsi = &pf->vsi;
148 struct ixl_queue *que = vsi->queues;
150 vsi->tx_itr_setting = pf->tx_itr;
152 for (int i = 0; i < vsi->num_queues; i++, que++) {
153 struct tx_ring *txr = &que->txr;
155 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
156 vsi->tx_itr_setting);
157 txr->itr = vsi->tx_itr_setting;
158 txr->latency = IXL_AVE_LATENCY;
163 ixl_configure_rx_itr(struct ixl_pf *pf)
165 struct i40e_hw *hw = &pf->hw;
166 struct ixl_vsi *vsi = &pf->vsi;
167 struct ixl_queue *que = vsi->queues;
169 vsi->rx_itr_setting = pf->rx_itr;
171 for (int i = 0; i < vsi->num_queues; i++, que++) {
172 struct rx_ring *rxr = &que->rxr;
174 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
175 vsi->rx_itr_setting);
176 rxr->itr = vsi->rx_itr_setting;
177 rxr->latency = IXL_AVE_LATENCY;
182 * Write PF ITR values to queue ITR registers.
185 ixl_configure_itr(struct ixl_pf *pf)
187 ixl_configure_tx_itr(pf);
188 ixl_configure_rx_itr(pf);
192 /*********************************************************************
195 * This routine is used in two ways. It is used by the stack as
196 * init entry point in network interface structure. It is also used
197 * by the driver as a hw/sw initialization routine to get to a
200 * return 0 on success, positive on failure
201 **********************************************************************/
203 ixl_init_locked(struct ixl_pf *pf)
205 struct i40e_hw *hw = &pf->hw;
206 struct ixl_vsi *vsi = &pf->vsi;
207 struct ifnet *ifp = vsi->ifp;
208 device_t dev = pf->dev;
209 struct i40e_filter_control_settings filter;
210 u8 tmpaddr[ETHER_ADDR_LEN];
213 INIT_DEBUGOUT("ixl_init_locked: begin");
214 IXL_PF_LOCK_ASSERT(pf);
219 * If the aq is dead here, it probably means something outside of the driver
220 * did something to the adapter, like a PF reset.
221 * So rebuild the driver's state here if that occurs.
223 if (!i40e_check_asq_alive(&pf->hw)) {
224 device_printf(dev, "Admin Queue is down; resetting...\n");
225 ixl_teardown_hw_structs(pf);
229 /* Get the latest mac address... User might use a LAA */
230 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
231 I40E_ETH_LENGTH_OF_ADDRESS);
232 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
233 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
234 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
235 bcopy(tmpaddr, hw->mac.addr,
236 I40E_ETH_LENGTH_OF_ADDRESS);
237 ret = i40e_aq_mac_address_write(hw,
238 I40E_AQC_WRITE_TYPE_LAA_ONLY,
241 device_printf(dev, "LLA address"
242 "change failed!!\n");
247 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
249 /* Set the various hardware offload abilities */
250 ifp->if_hwassist = 0;
251 if (ifp->if_capenable & IFCAP_TSO)
252 ifp->if_hwassist |= CSUM_TSO;
253 if (ifp->if_capenable & IFCAP_TXCSUM)
254 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
255 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
256 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
258 /* Set up the device filtering */
259 bzero(&filter, sizeof(filter));
260 filter.enable_ethtype = TRUE;
261 filter.enable_macvlan = TRUE;
262 filter.enable_fdir = FALSE;
263 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
264 if (i40e_set_filter_control(hw, &filter))
265 device_printf(dev, "i40e_set_filter_control() failed\n");
267 /* Prepare the VSI: rings, hmc contexts, etc... */
268 if (ixl_initialize_vsi(vsi)) {
269 device_printf(dev, "initialize vsi failed!!\n");
276 /* Add protocol filters to list */
277 ixl_init_filters(vsi);
279 /* Setup vlan's if needed */
280 ixl_setup_vlan_filters(vsi);
282 /* Set up MSI/X routing and the ITR settings */
284 ixl_configure_queue_intr_msix(pf);
285 ixl_configure_itr(pf);
287 ixl_configure_legacy(pf);
289 ixl_enable_rings(vsi);
291 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
293 ixl_reconfigure_filters(vsi);
295 /* And now turn on interrupts */
296 ixl_enable_intr(vsi);
299 hw->phy.get_link_info = TRUE;
300 i40e_get_link_status(hw, &pf->link_up);
301 ixl_update_link_status(pf);
303 /* Start the local timer */
304 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
306 /* Now inform the stack we're ready */
307 ifp->if_drv_flags |= IFF_DRV_RUNNING;
310 if (ixl_enable_iwarp && pf->iw_enabled) {
311 ret = ixl_iw_pf_init(pf);
314 "initialize iwarp failed, code %d\n", ret);
321 /*********************************************************************
323 * Get the hardware capabilities
325 **********************************************************************/
328 ixl_get_hw_capabilities(struct ixl_pf *pf)
330 struct i40e_aqc_list_capabilities_element_resp *buf;
331 struct i40e_hw *hw = &pf->hw;
332 device_t dev = pf->dev;
337 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
339 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
340 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
341 device_printf(dev, "Unable to allocate cap memory\n");
345 /* This populates the hw struct */
346 error = i40e_aq_discover_capabilities(hw, buf, len,
347 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
349 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
351 /* retry once with a larger buffer */
355 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
356 device_printf(dev, "capability discovery failed: %d\n",
357 pf->hw.aq.asq_last_status);
361 /* Capture this PF's starting queue pair */
362 pf->qbase = hw->func_caps.base_queue;
365 device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
366 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
367 hw->pf_id, hw->func_caps.num_vfs,
368 hw->func_caps.num_msix_vectors,
369 hw->func_caps.num_msix_vectors_vf,
370 hw->func_caps.fd_filters_guaranteed,
371 hw->func_caps.fd_filters_best_effort,
372 hw->func_caps.num_tx_qp,
373 hw->func_caps.num_rx_qp,
374 hw->func_caps.base_queue);
376 /* Print a subset of the capability information. */
377 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
378 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
379 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
380 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
381 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
384 struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
385 osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
386 if (osdep->i2c_intfc_num != -1)
393 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
395 device_t dev = vsi->dev;
397 /* Enable/disable TXCSUM/TSO4 */
398 if (!(ifp->if_capenable & IFCAP_TXCSUM)
399 && !(ifp->if_capenable & IFCAP_TSO4)) {
400 if (mask & IFCAP_TXCSUM) {
401 ifp->if_capenable |= IFCAP_TXCSUM;
402 /* enable TXCSUM, restore TSO if previously enabled */
403 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
404 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
405 ifp->if_capenable |= IFCAP_TSO4;
408 else if (mask & IFCAP_TSO4) {
409 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
410 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
412 "TSO4 requires txcsum, enabling both...\n");
414 } else if((ifp->if_capenable & IFCAP_TXCSUM)
415 && !(ifp->if_capenable & IFCAP_TSO4)) {
416 if (mask & IFCAP_TXCSUM)
417 ifp->if_capenable &= ~IFCAP_TXCSUM;
418 else if (mask & IFCAP_TSO4)
419 ifp->if_capenable |= IFCAP_TSO4;
420 } else if((ifp->if_capenable & IFCAP_TXCSUM)
421 && (ifp->if_capenable & IFCAP_TSO4)) {
422 if (mask & IFCAP_TXCSUM) {
423 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
424 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
426 "TSO4 requires txcsum, disabling both...\n");
427 } else if (mask & IFCAP_TSO4)
428 ifp->if_capenable &= ~IFCAP_TSO4;
431 /* Enable/disable TXCSUM_IPV6/TSO6 */
432 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
433 && !(ifp->if_capenable & IFCAP_TSO6)) {
434 if (mask & IFCAP_TXCSUM_IPV6) {
435 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
436 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
437 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
438 ifp->if_capenable |= IFCAP_TSO6;
440 } else if (mask & IFCAP_TSO6) {
441 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
442 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
444 "TSO6 requires txcsum6, enabling both...\n");
446 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
447 && !(ifp->if_capenable & IFCAP_TSO6)) {
448 if (mask & IFCAP_TXCSUM_IPV6)
449 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
450 else if (mask & IFCAP_TSO6)
451 ifp->if_capenable |= IFCAP_TSO6;
452 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
453 && (ifp->if_capenable & IFCAP_TSO6)) {
454 if (mask & IFCAP_TXCSUM_IPV6) {
455 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
456 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
458 "TSO6 requires txcsum6, disabling both...\n");
459 } else if (mask & IFCAP_TSO6)
460 ifp->if_capenable &= ~IFCAP_TSO6;
464 /* For the set_advertise sysctl */
466 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
468 struct i40e_hw *hw = &pf->hw;
469 device_t dev = pf->dev;
470 enum i40e_status_code status;
471 struct i40e_aq_get_phy_abilities_resp abilities;
473 /* Set initial sysctl values */
474 status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
477 /* Non-fatal error */
478 device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
483 pf->advertised_speed =
484 ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
488 ixl_teardown_hw_structs(struct ixl_pf *pf)
490 enum i40e_status_code status = 0;
491 struct i40e_hw *hw = &pf->hw;
492 device_t dev = pf->dev;
494 /* Shutdown LAN HMC */
495 if (hw->hmc.hmc_obj) {
496 status = i40e_shutdown_lan_hmc(hw);
499 "init: LAN HMC shutdown failure; status %d\n", status);
504 // XXX: This gets called when we know the adminq is inactive;
505 // so we already know it's setup when we get here.
507 /* Shutdown admin queue */
508 status = i40e_shutdown_adminq(hw);
511 "init: Admin Queue shutdown failure; status %d\n", status);
518 ixl_reset(struct ixl_pf *pf)
520 struct i40e_hw *hw = &pf->hw;
521 device_t dev = pf->dev;
525 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
527 error = i40e_pf_reset(hw);
529 device_printf(dev, "init: PF reset failure");
534 error = i40e_init_adminq(hw);
536 device_printf(dev, "init: Admin queue init failure;"
537 " status code %d", error);
542 i40e_clear_pxe_mode(hw);
544 error = ixl_get_hw_capabilities(pf);
546 device_printf(dev, "init: Error retrieving HW capabilities;"
547 " status code %d\n", error);
551 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
552 hw->func_caps.num_rx_qp, 0, 0);
554 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
560 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
562 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
568 // XXX: possible fix for panic, but our failure recovery is still broken
569 error = ixl_switch_config(pf);
571 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
576 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
579 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
580 " aq_err %d\n", error, hw->aq.asq_last_status);
585 error = i40e_set_fc(hw, &set_fc_err_mask, true);
587 device_printf(dev, "init: setting link flow control failed; retcode %d,"
588 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
592 // XXX: (Rebuild VSIs?)
594 /* Firmware delay workaround */
595 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
596 (hw->aq.fw_maj_ver < 4)) {
598 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
600 device_printf(dev, "init: link restart failed, aq_err %d\n",
601 hw->aq.asq_last_status);
612 ** MSIX Interrupt Handlers and Tasklets
615 ixl_handle_que(void *context, int pending)
617 struct ixl_queue *que = context;
618 struct ixl_vsi *vsi = que->vsi;
619 struct i40e_hw *hw = vsi->hw;
620 struct tx_ring *txr = &que->txr;
621 struct ifnet *ifp = vsi->ifp;
624 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
625 more = ixl_rxeof(que, IXL_RX_LIMIT);
628 if (!drbr_empty(ifp, txr->br))
629 ixl_mq_start_locked(ifp, txr);
632 taskqueue_enqueue(que->tq, &que->task);
637 /* Reenable this interrupt - hmmm */
638 ixl_enable_queue(hw, que->me);
643 /*********************************************************************
645 * Legacy Interrupt Service routine
647 **********************************************************************/
651 struct ixl_pf *pf = arg;
652 struct i40e_hw *hw = &pf->hw;
653 struct ixl_vsi *vsi = &pf->vsi;
654 struct ixl_queue *que = vsi->queues;
655 struct ifnet *ifp = vsi->ifp;
656 struct tx_ring *txr = &que->txr;
658 bool more_tx, more_rx;
662 /* Protect against spurious interrupts */
663 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
666 icr0 = rd32(hw, I40E_PFINT_ICR0);
670 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
671 taskqueue_enqueue(pf->tq, &pf->vflr_task);
674 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
675 taskqueue_enqueue(pf->tq, &pf->adminq);
678 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
681 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
684 more_tx = ixl_txeof(que);
685 if (!drbr_empty(vsi->ifp, txr->br))
690 ixl_enable_intr0(hw);
694 /*********************************************************************
696 * MSIX VSI Interrupt Service routine
698 **********************************************************************/
700 ixl_msix_que(void *arg)
702 struct ixl_queue *que = arg;
703 struct ixl_vsi *vsi = que->vsi;
704 struct i40e_hw *hw = vsi->hw;
705 struct tx_ring *txr = &que->txr;
706 bool more_tx, more_rx;
708 /* Protect against spurious interrupts */
709 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
714 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
717 more_tx = ixl_txeof(que);
719 ** Make certain that if the stack
720 ** has anything queued the task gets
721 ** scheduled to handle it.
723 if (!drbr_empty(vsi->ifp, txr->br))
727 ixl_set_queue_rx_itr(que);
728 ixl_set_queue_tx_itr(que);
730 if (more_tx || more_rx)
731 taskqueue_enqueue(que->tq, &que->task);
733 ixl_enable_queue(hw, que->me);
739 /*********************************************************************
741 * MSIX Admin Queue Interrupt Service routine
743 **********************************************************************/
745 ixl_msix_adminq(void *arg)
747 struct ixl_pf *pf = arg;
748 struct i40e_hw *hw = &pf->hw;
749 device_t dev = pf->dev;
750 u32 reg, mask, rstat_reg;
751 bool do_task = FALSE;
755 reg = rd32(hw, I40E_PFINT_ICR0);
756 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
758 /* Check on the cause */
759 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
760 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
764 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
765 ixl_handle_mdd_event(pf);
766 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
769 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
770 device_printf(dev, "Reset Requested!\n");
771 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
772 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
773 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
774 device_printf(dev, "Reset type: ");
776 /* These others might be handled similarly to an EMPR reset */
777 case I40E_RESET_CORER:
780 case I40E_RESET_GLOBR:
783 case I40E_RESET_EMPR:
785 atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
791 /* overload admin queue task to check reset progress */
795 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
796 device_printf(dev, "ECC Error detected!\n");
799 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
800 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
801 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
802 device_printf(dev, "HMC Error detected!\n");
803 device_printf(dev, "INFO 0x%08x\n", reg);
804 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
805 device_printf(dev, "DATA 0x%08x\n", reg);
806 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
810 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
811 device_printf(dev, "PCI Exception detected!\n");
815 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
816 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
817 taskqueue_enqueue(pf->tq, &pf->vflr_task);
822 taskqueue_enqueue(pf->tq, &pf->adminq);
824 ixl_enable_intr0(hw);
828 ixl_set_promisc(struct ixl_vsi *vsi)
830 struct ifnet *ifp = vsi->ifp;
831 struct i40e_hw *hw = vsi->hw;
833 bool uni = FALSE, multi = FALSE;
835 if (ifp->if_flags & IFF_ALLMULTI)
837 else { /* Need to count the multicast addresses */
838 struct ifmultiaddr *ifma;
840 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
841 if (ifma->ifma_addr->sa_family != AF_LINK)
843 if (mcnt == MAX_MULTICAST_ADDR)
847 if_maddr_runlock(ifp);
850 if (mcnt >= MAX_MULTICAST_ADDR)
852 if (ifp->if_flags & IFF_PROMISC)
855 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
856 vsi->seid, uni, NULL, TRUE);
857 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
858 vsi->seid, multi, NULL);
862 /*********************************************************************
865 * Routines for multicast and vlan filter management.
867 *********************************************************************/
869 ixl_add_multi(struct ixl_vsi *vsi)
871 struct ifmultiaddr *ifma;
872 struct ifnet *ifp = vsi->ifp;
873 struct i40e_hw *hw = vsi->hw;
876 IOCTL_DEBUGOUT("ixl_add_multi: begin");
880 ** First just get a count, to decide if we
881 ** we simply use multicast promiscuous.
883 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
884 if (ifma->ifma_addr->sa_family != AF_LINK)
888 if_maddr_runlock(ifp);
890 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
891 /* delete existing MC filters */
892 ixl_del_hw_filters(vsi, mcnt);
893 i40e_aq_set_vsi_multicast_promiscuous(hw,
894 vsi->seid, TRUE, NULL);
900 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
901 if (ifma->ifma_addr->sa_family != AF_LINK)
903 ixl_add_mc_filter(vsi,
904 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
907 if_maddr_runlock(ifp);
909 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
910 ixl_add_hw_filters(vsi, flags, mcnt);
913 IOCTL_DEBUGOUT("ixl_add_multi: end");
918 ixl_del_multi(struct ixl_vsi *vsi)
920 struct ifnet *ifp = vsi->ifp;
921 struct ifmultiaddr *ifma;
922 struct ixl_mac_filter *f;
926 IOCTL_DEBUGOUT("ixl_del_multi: begin");
928 /* Search for removed multicast addresses */
930 SLIST_FOREACH(f, &vsi->ftl, next) {
931 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
933 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
934 if (ifma->ifma_addr->sa_family != AF_LINK)
936 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
937 if (cmp_etheraddr(f->macaddr, mc_addr)) {
942 if (match == FALSE) {
943 f->flags |= IXL_FILTER_DEL;
948 if_maddr_runlock(ifp);
951 ixl_del_hw_filters(vsi, mcnt);
955 /*********************************************************************
958 * This routine checks for link status,updates statistics,
959 * and runs the watchdog check.
961 * Only runs when the driver is configured UP and RUNNING.
963 **********************************************************************/
966 ixl_local_timer(void *arg)
968 struct ixl_pf *pf = arg;
969 struct i40e_hw *hw = &pf->hw;
970 struct ixl_vsi *vsi = &pf->vsi;
971 struct ixl_queue *que = vsi->queues;
972 device_t dev = pf->dev;
976 s32 timer, new_timer;
978 IXL_PF_LOCK_ASSERT(pf);
980 /* Fire off the adminq task */
981 taskqueue_enqueue(pf->tq, &pf->adminq);
984 ixl_update_stats_counters(pf);
986 /* Check status of the queues */
987 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
988 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
989 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
991 for (int i = 0; i < vsi->num_queues; i++, que++) {
993 timer = atomic_load_acq_32(&txr->watchdog_timer);
995 new_timer = timer - hz;
996 if (new_timer <= 0) {
997 atomic_store_rel_32(&txr->watchdog_timer, -1);
998 device_printf(dev, "WARNING: queue %d "
999 "appears to be hung!\n", que->me);
1003 * If this fails, that means something in the TX path has updated
1004 * the watchdog, so it means the TX path is still working and
1005 * the watchdog doesn't need to countdown.
1007 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
1008 /* Any queues with outstanding work get a sw irq */
1009 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1013 /* Reset when a queue shows hung */
1017 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1021 device_printf(dev, "WARNING: Resetting!\n");
1022 pf->watchdog_events++;
1023 ixl_init_locked(pf);
1027 ixl_link_up_msg(struct ixl_pf *pf)
1029 struct i40e_hw *hw = &pf->hw;
1030 struct ifnet *ifp = pf->vsi.ifp;
1032 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
1034 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1035 (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
1036 "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
1037 "Clause 108 RS-FEC" : "None",
1038 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1039 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1040 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1041 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1042 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1043 ixl_fc_string[1] : ixl_fc_string[0]);
1047 ** Note: this routine updates the OS on the link state
1048 ** the real check of the hardware only happens with
1049 ** a link interrupt.
1052 ixl_update_link_status(struct ixl_pf *pf)
1054 struct ixl_vsi *vsi = &pf->vsi;
1055 struct ifnet *ifp = vsi->ifp;
1056 device_t dev = pf->dev;
1059 if (vsi->link_active == FALSE) {
1060 vsi->link_active = TRUE;
1061 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1062 if_link_state_change(ifp, LINK_STATE_UP);
1063 ixl_link_up_msg(pf);
1065 } else { /* Link down */
1066 if (vsi->link_active == TRUE) {
1068 device_printf(dev, "Link is Down\n");
1069 if_link_state_change(ifp, LINK_STATE_DOWN);
1070 vsi->link_active = FALSE;
1077 /*********************************************************************
1079 * This routine disables all traffic on the adapter by issuing a
1080 * global reset on the MAC and deallocates TX/RX buffers.
1082 **********************************************************************/
1085 ixl_stop_locked(struct ixl_pf *pf)
1087 struct ixl_vsi *vsi = &pf->vsi;
1088 struct ifnet *ifp = vsi->ifp;
1090 INIT_DEBUGOUT("ixl_stop: begin\n");
1092 IXL_PF_LOCK_ASSERT(pf);
1095 /* Stop iWARP device */
1096 if (ixl_enable_iwarp && pf->iw_enabled)
1100 /* Stop the local timer */
1101 callout_stop(&pf->timer);
1103 ixl_disable_rings_intr(vsi);
1104 ixl_disable_rings(vsi);
1106 /* Tell the stack that the interface is no longer active */
1107 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1111 ixl_stop(struct ixl_pf *pf)
1114 ixl_stop_locked(pf);
1118 /*********************************************************************
1120 * Setup MSIX Interrupt resources and handlers for the VSI
1122 **********************************************************************/
1124 ixl_setup_legacy(struct ixl_pf *pf)
1126 device_t dev = pf->dev;
1131 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1132 &rid, RF_SHAREABLE | RF_ACTIVE);
1133 if (pf->res == NULL) {
1134 device_printf(dev, "bus_alloc_resource_any() for"
1135 " legacy/msi interrupt\n");
1139 /* Set the handler function */
1140 error = bus_setup_intr(dev, pf->res,
1141 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1142 ixl_intr, pf, &pf->tag);
1145 device_printf(dev, "bus_setup_intr() for legacy/msi"
1146 " interrupt handler failed, error %d\n", error);
1149 error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1152 device_printf(dev, "bus_describe_intr() for Admin Queue"
1153 " interrupt name failed, error %d\n", error);
1160 ixl_setup_adminq_tq(struct ixl_pf *pf)
1162 device_t dev = pf->dev;
1165 /* Tasklet for Admin Queue interrupts */
1166 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1169 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1171 /* Create and start Admin Queue taskqueue */
1172 pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1173 taskqueue_thread_enqueue, &pf->tq);
1175 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1178 error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1179 device_get_nameunit(dev));
1181 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1183 taskqueue_free(pf->tq);
1190 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1192 struct ixl_queue *que = vsi->queues;
1193 device_t dev = vsi->dev;
1199 /* Create queue tasks and start queue taskqueues */
1200 for (int i = 0; i < vsi->num_queues; i++, que++) {
1201 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1202 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1203 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1204 taskqueue_thread_enqueue, &que->tq);
1206 CPU_SETOF(cpu_id, &cpu_mask);
1207 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1208 &cpu_mask, "%s (bucket %d)",
1209 device_get_nameunit(dev), cpu_id);
1211 taskqueue_start_threads(&que->tq, 1, PI_NET,
1212 "%s (que %d)", device_get_nameunit(dev), que->me);
1220 ixl_free_adminq_tq(struct ixl_pf *pf)
1223 taskqueue_free(pf->tq);
1229 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1231 struct ixl_queue *que = vsi->queues;
1233 for (int i = 0; i < vsi->num_queues; i++, que++) {
1235 taskqueue_free(que->tq);
1242 ixl_setup_adminq_msix(struct ixl_pf *pf)
1244 device_t dev = pf->dev;
1247 /* Admin IRQ rid is 1, vector is 0 */
1249 /* Get interrupt resource from bus */
1250 pf->res = bus_alloc_resource_any(dev,
1251 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1253 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1254 " interrupt failed [rid=%d]\n", rid);
1257 /* Then associate interrupt with handler */
1258 error = bus_setup_intr(dev, pf->res,
1259 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1260 ixl_msix_adminq, pf, &pf->tag);
1263 device_printf(dev, "bus_setup_intr() for Admin Queue"
1264 " interrupt handler failed, error %d\n", error);
1267 error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1270 device_printf(dev, "bus_describe_intr() for Admin Queue"
1271 " interrupt name failed, error %d\n", error);
1279 * Allocate interrupt resources from bus and associate an interrupt handler
1280 * to those for the VSI's queues.
1283 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1285 device_t dev = vsi->dev;
1286 struct ixl_queue *que = vsi->queues;
1287 struct tx_ring *txr;
1288 int error, rid, vector = 1;
1290 /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1291 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1295 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1296 RF_SHAREABLE | RF_ACTIVE);
1298 device_printf(dev, "bus_alloc_resource_any() for"
1299 " Queue %d interrupt failed [rid=%d]\n",
1303 /* Set the handler function */
1304 error = bus_setup_intr(dev, que->res,
1305 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1306 ixl_msix_que, que, &que->tag);
1308 device_printf(dev, "bus_setup_intr() for Queue %d"
1309 " interrupt handler failed, error %d\n",
1311 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1314 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1316 device_printf(dev, "bus_describe_intr() for Queue %d"
1317 " interrupt name failed, error %d\n",
1320 /* Bind the vector to a CPU */
1322 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1324 error = bus_bind_intr(dev, que->res, cpu_id);
1326 device_printf(dev, "bus_bind_intr() for Queue %d"
1327 " to CPU %d failed, error %d\n",
1328 que->me, cpu_id, error);
1337 * When used in a virtualized environment PCI BUSMASTER capability may not be set
1338 * so explicity set it here and rewrite the ENABLE in the MSIX control register
1339 * at this point to cause the host to successfully initialize us.
1342 ixl_set_busmaster(device_t dev)
1346 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1347 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1348 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1352 * rewrite the ENABLE in the MSIX control register
1353 * to cause the host to successfully initialize us.
1356 ixl_set_msix_enable(device_t dev)
1360 pci_find_cap(dev, PCIY_MSIX, &rid);
1361 rid += PCIR_MSIX_CTRL;
1362 msix_ctrl = pci_read_config(dev, rid, 2);
1363 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1364 pci_write_config(dev, rid, msix_ctrl, 2);
1368 * Allocate MSI/X vectors from the OS.
1369 * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1372 ixl_init_msix(struct ixl_pf *pf)
1374 device_t dev = pf->dev;
1375 struct i40e_hw *hw = &pf->hw;
1376 int auto_max_queues;
1377 int rid, want, vectors, queues, available;
1379 int iw_want, iw_vectors;
1384 /* Override by tuneable */
1385 if (!pf->enable_msix)
1388 /* Ensure proper operation in virtualized environment */
1389 ixl_set_busmaster(dev);
1391 /* First try MSI/X */
1392 rid = PCIR_BAR(IXL_MSIX_BAR);
1393 pf->msix_mem = bus_alloc_resource_any(dev,
1394 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1395 if (!pf->msix_mem) {
1396 /* May not be enabled */
1397 device_printf(pf->dev,
1398 "Unable to map MSIX table\n");
1402 available = pci_msix_count(dev);
1403 if (available < 2) {
1404 /* system has msix disabled (0), or only one vector (1) */
1405 bus_release_resource(dev, SYS_RES_MEMORY,
1407 pf->msix_mem = NULL;
1411 /* Clamp max number of queues based on:
1412 * - # of MSI-X vectors available
1413 * - # of cpus available
1414 * - # of queues that can be assigned to the LAN VSI
1416 auto_max_queues = min(mp_ncpus, available - 1);
1417 if (hw->mac.type == I40E_MAC_X722)
1418 auto_max_queues = min(auto_max_queues, 128);
1420 auto_max_queues = min(auto_max_queues, 64);
1422 /* Override with tunable value if tunable is less than autoconfig count */
1423 if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1424 queues = pf->max_queues;
1425 /* Use autoconfig amount if that's lower */
1426 else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1427 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1428 "autoconfig amount (%d)...\n",
1429 pf->max_queues, auto_max_queues);
1430 queues = auto_max_queues;
1432 /* Limit maximum auto-configured queues to 8 if no user value is set */
1434 queues = min(auto_max_queues, 8);
1437 /* If we're doing RSS, clamp at the number of RSS buckets */
1438 if (queues > rss_getnumbuckets())
1439 queues = rss_getnumbuckets();
1443 ** Want one vector (RX/TX pair) per queue
1444 ** plus an additional for the admin queue.
1447 if (want <= available) /* Have enough */
1450 device_printf(pf->dev,
1451 "MSIX Configuration Problem, "
1452 "%d vectors available but %d wanted!\n",
1454 pf->msix_mem = NULL;
1455 goto no_msix; /* Will go to Legacy setup */
1459 if (ixl_enable_iwarp) {
1460 /* iWARP wants additional vector for CQP */
1461 iw_want = mp_ncpus + 1;
1462 available -= vectors;
1463 if (available > 0) {
1464 iw_vectors = (available >= iw_want) ?
1465 iw_want : available;
1466 vectors += iw_vectors;
1472 ixl_set_msix_enable(dev);
1473 if (pci_alloc_msix(dev, &vectors) == 0) {
1474 device_printf(pf->dev,
1475 "Using MSIX interrupts with %d vectors\n", vectors);
1478 if (ixl_enable_iwarp)
1479 pf->iw_msix = iw_vectors;
1482 pf->vsi.num_queues = queues;
1485 * If we're doing RSS, the number of queues needs to
1486 * match the number of RSS buckets that are configured.
1488 * + If there's more queues than RSS buckets, we'll end
1489 * up with queues that get no traffic.
1491 * + If there's more RSS buckets than queues, we'll end
1492 * up having multiple RSS buckets map to the same queue,
1493 * so there'll be some contention.
1495 if (queues != rss_getnumbuckets()) {
1497 "%s: queues (%d) != RSS buckets (%d)"
1498 "; performance will be impacted.\n",
1499 __func__, queues, rss_getnumbuckets());
1505 vectors = pci_msi_count(dev);
1506 pf->vsi.num_queues = 1;
1508 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1509 device_printf(pf->dev, "Using an MSI interrupt\n");
1512 device_printf(pf->dev, "Using a Legacy interrupt\n");
1518 * Configure admin queue/misc interrupt cause registers in hardware.
1521 ixl_configure_intr0_msix(struct ixl_pf *pf)
1523 struct i40e_hw *hw = &pf->hw;
1526 /* First set up the adminq - vector 0 */
1527 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
1528 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
1530 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1531 I40E_PFINT_ICR0_ENA_GRST_MASK |
1532 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1533 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1534 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1535 I40E_PFINT_ICR0_ENA_VFLR_MASK |
1536 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1537 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1540 * 0x7FF is the end of the queue list.
1541 * This means we won't use MSI-X vector 0 for a queue interrupt
1544 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1545 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1546 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1548 wr32(hw, I40E_PFINT_DYN_CTL0,
1549 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1550 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1552 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1556 * Configure queue interrupt cause registers in hardware.
1559 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1561 struct i40e_hw *hw = &pf->hw;
1562 struct ixl_vsi *vsi = &pf->vsi;
1566 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1567 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1568 /* First queue type is RX / 0 */
1569 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1571 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1572 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1573 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1574 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1575 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1576 wr32(hw, I40E_QINT_RQCTL(i), reg);
1578 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1579 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1580 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1581 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1582 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1583 wr32(hw, I40E_QINT_TQCTL(i), reg);
1588 * Configure for MSI single vector operation
1591 ixl_configure_legacy(struct ixl_pf *pf)
1593 struct i40e_hw *hw = &pf->hw;
1594 struct ixl_vsi *vsi = &pf->vsi;
1595 struct ixl_queue *que = vsi->queues;
1596 struct rx_ring *rxr = &que->rxr;
1597 struct tx_ring *txr = &que->txr;
1601 vsi->tx_itr_setting = pf->tx_itr;
1602 wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1603 vsi->tx_itr_setting);
1604 txr->itr = vsi->tx_itr_setting;
1606 vsi->rx_itr_setting = pf->rx_itr;
1607 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1608 vsi->rx_itr_setting);
1609 rxr->itr = vsi->rx_itr_setting;
1611 /* Setup "other" causes */
1612 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1613 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1614 | I40E_PFINT_ICR0_ENA_GRST_MASK
1615 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1616 | I40E_PFINT_ICR0_ENA_GPIO_MASK
1617 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1618 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1619 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1620 | I40E_PFINT_ICR0_ENA_VFLR_MASK
1621 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1623 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1625 /* No ITR for non-queue interrupts */
1626 wr32(hw, I40E_PFINT_STAT_CTL0,
1627 IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1629 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1630 wr32(hw, I40E_PFINT_LNKLST0, 0);
1632 /* Associate the queue pair to the vector and enable the q int */
1633 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1634 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1635 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1636 wr32(hw, I40E_QINT_RQCTL(0), reg);
1638 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1639 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1640 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1641 wr32(hw, I40E_QINT_TQCTL(0), reg);
1645 ixl_allocate_pci_resources(struct ixl_pf *pf)
1648 struct i40e_hw *hw = &pf->hw;
1649 device_t dev = pf->dev;
1653 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1656 if (!(pf->pci_mem)) {
1657 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1661 /* Save off the PCI information */
1662 hw->vendor_id = pci_get_vendor(dev);
1663 hw->device_id = pci_get_device(dev);
1664 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1665 hw->subsystem_vendor_id =
1666 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1667 hw->subsystem_device_id =
1668 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1670 hw->bus.device = pci_get_slot(dev);
1671 hw->bus.func = pci_get_function(dev);
1673 /* Save off register access information */
1674 pf->osdep.mem_bus_space_tag =
1675 rman_get_bustag(pf->pci_mem);
1676 pf->osdep.mem_bus_space_handle =
1677 rman_get_bushandle(pf->pci_mem);
1678 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1679 pf->osdep.flush_reg = I40E_GLGEN_STAT;
1680 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1682 pf->hw.back = &pf->osdep;
1688 * Teardown and release the admin queue/misc vector
1692 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1694 device_t dev = pf->dev;
1697 if (pf->admvec) /* we are doing MSIX */
1698 rid = pf->admvec + 1;
1700 (pf->msix != 0) ? (rid = 1):(rid = 0);
1702 if (pf->tag != NULL) {
1703 bus_teardown_intr(dev, pf->res, pf->tag);
1705 device_printf(dev, "bus_teardown_intr() for"
1706 " interrupt 0 failed\n");
1711 if (pf->res != NULL) {
1712 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1714 device_printf(dev, "bus_release_resource() for"
1715 " interrupt 0 failed [rid=%d]\n", rid);
1725 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1727 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1728 struct ixl_queue *que = vsi->queues;
1729 device_t dev = vsi->dev;
1732 /* We may get here before stations are setup */
1733 if ((pf->msix < 2) || (que == NULL))
1736 /* Release all MSIX queue resources */
1737 for (int i = 0; i < vsi->num_queues; i++, que++) {
1738 rid = que->msix + 1;
1739 if (que->tag != NULL) {
1740 error = bus_teardown_intr(dev, que->res, que->tag);
1742 device_printf(dev, "bus_teardown_intr() for"
1743 " Queue %d interrupt failed\n",
1749 if (que->res != NULL) {
1750 error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1752 device_printf(dev, "bus_release_resource() for"
1753 " Queue %d interrupt failed [rid=%d]\n",
1765 ixl_free_pci_resources(struct ixl_pf *pf)
1767 device_t dev = pf->dev;
1770 ixl_teardown_queue_msix(&pf->vsi);
1771 ixl_teardown_adminq_msix(pf);
1774 pci_release_msi(dev);
1776 memrid = PCIR_BAR(IXL_MSIX_BAR);
1778 if (pf->msix_mem != NULL)
1779 bus_release_resource(dev, SYS_RES_MEMORY,
1780 memrid, pf->msix_mem);
1782 if (pf->pci_mem != NULL)
1783 bus_release_resource(dev, SYS_RES_MEMORY,
1784 PCIR_BAR(0), pf->pci_mem);
1790 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1792 /* Display supported media types */
1793 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1794 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1796 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1797 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1798 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1799 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1800 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1801 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1803 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1804 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1805 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1806 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1808 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1809 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1810 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1811 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1812 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1813 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1815 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1816 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1817 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1818 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1819 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1820 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1821 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1822 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1823 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1824 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1826 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1827 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1829 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1830 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1831 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1832 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1833 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1834 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1835 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1836 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1837 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1838 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1839 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1841 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1842 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1844 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1845 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1846 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1847 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1849 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1850 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1851 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1852 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1853 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1854 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1855 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1856 ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
1859 /*********************************************************************
1861 * Setup networking device structure and register an interface.
1863 **********************************************************************/
1865 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1867 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1869 struct i40e_hw *hw = vsi->hw;
1870 struct ixl_queue *que = vsi->queues;
1871 struct i40e_aq_get_phy_abilities_resp abilities;
1872 enum i40e_status_code aq_error = 0;
1874 INIT_DEBUGOUT("ixl_setup_interface: begin");
1876 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1878 device_printf(dev, "can not allocate ifnet structure\n");
1881 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1882 ifp->if_mtu = ETHERMTU;
1883 ifp->if_init = ixl_init;
1884 ifp->if_softc = vsi;
1885 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1886 ifp->if_ioctl = ixl_ioctl;
1888 #if __FreeBSD_version >= 1100036
1889 if_setgetcounterfn(ifp, ixl_get_counter);
1892 ifp->if_transmit = ixl_mq_start;
1894 ifp->if_qflush = ixl_qflush;
1896 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1898 vsi->max_frame_size =
1899 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1900 + ETHER_VLAN_ENCAP_LEN;
1902 /* Set TSO limits */
1903 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1904 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1905 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1908 * Tell the upper layer(s) we support long frames.
1910 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1912 ifp->if_capabilities |= IFCAP_HWCSUM;
1913 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1914 ifp->if_capabilities |= IFCAP_TSO;
1915 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1916 ifp->if_capabilities |= IFCAP_LRO;
1918 /* VLAN capabilties */
1919 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1922 | IFCAP_VLAN_HWCSUM;
1923 ifp->if_capenable = ifp->if_capabilities;
1926 ** Don't turn this on by default, if vlans are
1927 ** created on another pseudo device (eg. lagg)
1928 ** then vlan events are not passed thru, breaking
1929 ** operation, but with HW FILTER off it works. If
1930 ** using vlans directly on the ixl driver you can
1931 ** enable this and get full hardware tag filtering.
1933 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1936 * Specify the media types supported by this adapter and register
1937 * callbacks to update media and link information
1939 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1942 aq_error = i40e_aq_get_phy_capabilities(hw,
1943 FALSE, TRUE, &abilities, NULL);
1944 /* May need delay to detect fiber correctly */
1945 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1946 i40e_msec_delay(200);
1947 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1948 TRUE, &abilities, NULL);
1951 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1952 device_printf(dev, "Unknown PHY type detected!\n");
1955 "Error getting supported media types, err %d,"
1956 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1959 pf->supported_speeds = abilities.link_speed;
1960 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1962 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1964 /* Use autoselect media by default */
1965 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1966 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1968 ether_ifattach(ifp, hw->mac.addr);
1974 ** Run when the Admin Queue gets a link state change interrupt.
1977 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1979 struct i40e_hw *hw = &pf->hw;
1980 device_t dev = pf->dev;
1981 struct i40e_aqc_get_link_status *status =
1982 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1984 /* Request link status from adapter */
1985 hw->phy.get_link_info = TRUE;
1986 i40e_get_link_status(hw, &pf->link_up);
1988 /* Print out message if an unqualified module is found */
1989 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1990 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1991 (!(status->link_info & I40E_AQ_LINK_UP)))
1992 device_printf(dev, "Link failed because "
1993 "an unqualified module was detected!\n");
1995 /* Update OS link info */
1996 ixl_update_link_status(pf);
1999 /*********************************************************************
2001 * Get Firmware Switch configuration
2002 * - this will need to be more robust when more complex
2003 * switch configurations are enabled.
2005 **********************************************************************/
2007 ixl_switch_config(struct ixl_pf *pf)
2009 struct i40e_hw *hw = &pf->hw;
2010 struct ixl_vsi *vsi = &pf->vsi;
2011 device_t dev = vsi->dev;
2012 struct i40e_aqc_get_switch_config_resp *sw_config;
2013 u8 aq_buf[I40E_AQ_LARGE_BUF];
2017 memset(&aq_buf, 0, sizeof(aq_buf));
2018 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2019 ret = i40e_aq_get_switch_config(hw, sw_config,
2020 sizeof(aq_buf), &next, NULL);
2022 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2023 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2026 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2028 "Switch config: header reported: %d in structure, %d total\n",
2029 sw_config->header.num_reported, sw_config->header.num_total);
2030 for (int i = 0; i < sw_config->header.num_reported; i++) {
2032 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2033 sw_config->element[i].element_type,
2034 sw_config->element[i].seid,
2035 sw_config->element[i].uplink_seid,
2036 sw_config->element[i].downlink_seid);
2039 /* Simplified due to a single VSI */
2040 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2041 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2042 vsi->seid = sw_config->element[0].seid;
2046 /*********************************************************************
2048 * Initialize the VSI: this handles contexts, which means things
2049 * like the number of descriptors, buffer size,
2050 * plus we init the rings thru this function.
2052 **********************************************************************/
2054 ixl_initialize_vsi(struct ixl_vsi *vsi)
2056 struct ixl_pf *pf = vsi->back;
2057 struct ixl_queue *que = vsi->queues;
2058 device_t dev = vsi->dev;
2059 struct i40e_hw *hw = vsi->hw;
2060 struct i40e_vsi_context ctxt;
2064 memset(&ctxt, 0, sizeof(ctxt));
2065 ctxt.seid = vsi->seid;
2066 if (pf->veb_seid != 0)
2067 ctxt.uplink_seid = pf->veb_seid;
2068 ctxt.pf_num = hw->pf_id;
2069 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2071 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2072 " aq_error %d\n", err, hw->aq.asq_last_status);
2075 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2076 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2077 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2078 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2079 ctxt.uplink_seid, ctxt.vsi_number,
2080 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2081 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2082 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2084 ** Set the queue and traffic class bits
2085 ** - when multiple traffic classes are supported
2086 ** this will need to be more robust.
2088 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2089 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2090 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2091 ctxt.info.queue_mapping[0] = 0;
2093 * This VSI will only use traffic class 0; start traffic class 0's
2094 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2095 * the driver may not use all of them).
2097 tc_queues = bsrl(pf->qtag.num_allocated);
2098 ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2099 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2100 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2101 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2103 /* Set VLAN receive stripping mode */
2104 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2105 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2106 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2107 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2109 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2112 /* Set TCP Enable for iWARP capable VSI */
2113 if (ixl_enable_iwarp && pf->iw_enabled) {
2114 ctxt.info.valid_sections |=
2115 htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2116 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2119 /* Save VSI number and info for use later */
2120 vsi->vsi_num = ctxt.vsi_number;
2121 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2123 /* Reset VSI statistics */
2124 ixl_vsi_reset_stats(vsi);
2125 vsi->hw_filters_add = 0;
2126 vsi->hw_filters_del = 0;
2128 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2130 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2132 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2133 " aq_error %d\n", err, hw->aq.asq_last_status);
2137 for (int i = 0; i < vsi->num_queues; i++, que++) {
2138 struct tx_ring *txr = &que->txr;
2139 struct rx_ring *rxr = &que->rxr;
2140 struct i40e_hmc_obj_txq tctx;
2141 struct i40e_hmc_obj_rxq rctx;
2145 /* Setup the HMC TX Context */
2146 size = que->num_desc * sizeof(struct i40e_tx_desc);
2147 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2148 tctx.new_context = 1;
2149 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2150 tctx.qlen = que->num_desc;
2152 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2153 /* Enable HEAD writeback */
2154 tctx.head_wb_ena = 1;
2155 tctx.head_wb_addr = txr->dma.pa +
2156 (que->num_desc * sizeof(struct i40e_tx_desc));
2157 tctx.rdylist_act = 0;
2158 err = i40e_clear_lan_tx_queue_context(hw, i);
2160 device_printf(dev, "Unable to clear TX context\n");
2163 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2165 device_printf(dev, "Unable to set TX context\n");
2168 /* Associate the ring with this PF */
2169 txctl = I40E_QTX_CTL_PF_QUEUE;
2170 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2171 I40E_QTX_CTL_PF_INDX_MASK);
2172 wr32(hw, I40E_QTX_CTL(i), txctl);
2175 /* Do ring (re)init */
2176 ixl_init_tx_ring(que);
2178 /* Next setup the HMC RX Context */
2179 if (vsi->max_frame_size <= MCLBYTES)
2180 rxr->mbuf_sz = MCLBYTES;
2182 rxr->mbuf_sz = MJUMPAGESIZE;
2184 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2186 /* Set up an RX context for the HMC */
2187 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2188 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2189 /* ignore header split for now */
2190 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2191 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2192 vsi->max_frame_size : max_rxmax;
2194 rctx.dsize = 1; /* do 32byte descriptors */
2195 rctx.hsplit_0 = 0; /* no HDR split initially */
2196 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2197 rctx.qlen = que->num_desc;
2198 rctx.tphrdesc_ena = 1;
2199 rctx.tphwdesc_ena = 1;
2200 rctx.tphdata_ena = 0;
2201 rctx.tphhead_ena = 0;
2202 rctx.lrxqthresh = 2;
2209 err = i40e_clear_lan_rx_queue_context(hw, i);
2212 "Unable to clear RX context %d\n", i);
2215 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2217 device_printf(dev, "Unable to set RX context %d\n", i);
2220 err = ixl_init_rx_ring(que);
2222 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2226 /* preserve queue */
2227 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2228 struct netmap_adapter *na = NA(vsi->ifp);
2229 struct netmap_kring *kring = &na->rx_rings[i];
2230 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2231 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2233 #endif /* DEV_NETMAP */
2234 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2240 /*********************************************************************
2242 * Free all VSI structs.
2244 **********************************************************************/
2246 ixl_free_vsi(struct ixl_vsi *vsi)
2248 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2249 struct ixl_queue *que = vsi->queues;
2251 /* Free station queues */
2255 for (int i = 0; i < vsi->num_queues; i++, que++) {
2256 struct tx_ring *txr = &que->txr;
2257 struct rx_ring *rxr = &que->rxr;
2259 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2262 ixl_free_que_tx(que);
2264 i40e_free_dma_mem(&pf->hw, &txr->dma);
2266 IXL_TX_LOCK_DESTROY(txr);
2268 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2271 ixl_free_que_rx(que);
2273 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2275 IXL_RX_LOCK_DESTROY(rxr);
2277 free(vsi->queues, M_DEVBUF);
2280 /* Free VSI filter list */
2281 ixl_free_mac_filters(vsi);
2285 ixl_free_mac_filters(struct ixl_vsi *vsi)
2287 struct ixl_mac_filter *f;
2289 while (!SLIST_EMPTY(&vsi->ftl)) {
2290 f = SLIST_FIRST(&vsi->ftl);
2291 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2297 * Fill out fields in queue struct and setup tx/rx memory and structs
2300 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2302 device_t dev = pf->dev;
2303 struct i40e_hw *hw = &pf->hw;
2304 struct ixl_vsi *vsi = &pf->vsi;
2305 struct tx_ring *txr = &que->txr;
2306 struct rx_ring *rxr = &que->rxr;
2310 que->num_desc = pf->ringsz;
2315 txr->tail = I40E_QTX_TAIL(que->me);
2317 /* Initialize the TX lock */
2318 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2319 device_get_nameunit(dev), que->me);
2320 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2321 /* Create the TX descriptor ring */
2322 tsize = roundup2((que->num_desc *
2323 sizeof(struct i40e_tx_desc)) +
2324 sizeof(u32), DBA_ALIGN);
2325 if (i40e_allocate_dma_mem(hw,
2326 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2328 "Unable to allocate TX Descriptor memory\n");
2332 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2333 bzero((void *)txr->base, tsize);
2334 /* Now allocate transmit soft structs for the ring */
2335 if (ixl_allocate_tx_data(que)) {
2337 "Critical Failure setting up TX structures\n");
2341 /* Allocate a buf ring */
2342 txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2343 M_NOWAIT, &txr->mtx);
2344 if (txr->br == NULL) {
2346 "Critical Failure setting up TX buf ring\n");
2351 rsize = roundup2(que->num_desc *
2352 sizeof(union i40e_rx_desc), DBA_ALIGN);
2354 rxr->tail = I40E_QRX_TAIL(que->me);
2356 /* Initialize the RX side lock */
2357 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2358 device_get_nameunit(dev), que->me);
2359 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2361 if (i40e_allocate_dma_mem(hw,
2362 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2364 "Unable to allocate RX Descriptor memory\n");
2368 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2369 bzero((void *)rxr->base, rsize);
2370 /* Allocate receive soft structs for the ring*/
2371 if (ixl_allocate_rx_data(que)) {
2373 "Critical Failure setting up receive structs\n");
2381 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2382 if (mtx_initialized(&rxr->mtx))
2383 mtx_destroy(&rxr->mtx);
2385 buf_ring_free(txr->br, M_DEVBUF);
2389 i40e_free_dma_mem(&pf->hw, &txr->dma);
2390 if (mtx_initialized(&txr->mtx))
2391 mtx_destroy(&txr->mtx);
2396 /*********************************************************************
2398 * Allocate memory for the VSI (virtual station interface) and their
2399 * associated queues, rings and the descriptors associated with each,
2400 * called only once at attach.
2402 **********************************************************************/
2404 ixl_setup_stations(struct ixl_pf *pf)
2406 device_t dev = pf->dev;
2407 struct ixl_vsi *vsi;
2408 struct ixl_queue *que;
2412 vsi->back = (void *)pf;
2418 /* Get memory for the station queues */
2420 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2421 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2422 device_printf(dev, "Unable to allocate queue memory\n");
2427 /* Then setup each queue */
2428 for (int i = 0; i < vsi->num_queues; i++) {
2429 que = &vsi->queues[i];
2430 error = ixl_setup_queue(que, pf, i);
2439 ** Provide a update to the queue RX
2440 ** interrupt moderation value.
2443 ixl_set_queue_rx_itr(struct ixl_queue *que)
2445 struct ixl_vsi *vsi = que->vsi;
2446 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2447 struct i40e_hw *hw = vsi->hw;
2448 struct rx_ring *rxr = &que->rxr;
2453 /* Idle, do nothing */
2454 if (rxr->bytes == 0)
2457 if (pf->dynamic_rx_itr) {
2458 rx_bytes = rxr->bytes/rxr->itr;
2461 /* Adjust latency range */
2462 switch (rxr->latency) {
2463 case IXL_LOW_LATENCY:
2464 if (rx_bytes > 10) {
2465 rx_latency = IXL_AVE_LATENCY;
2466 rx_itr = IXL_ITR_20K;
2469 case IXL_AVE_LATENCY:
2470 if (rx_bytes > 20) {
2471 rx_latency = IXL_BULK_LATENCY;
2472 rx_itr = IXL_ITR_8K;
2473 } else if (rx_bytes <= 10) {
2474 rx_latency = IXL_LOW_LATENCY;
2475 rx_itr = IXL_ITR_100K;
2478 case IXL_BULK_LATENCY:
2479 if (rx_bytes <= 20) {
2480 rx_latency = IXL_AVE_LATENCY;
2481 rx_itr = IXL_ITR_20K;
2486 rxr->latency = rx_latency;
2488 if (rx_itr != rxr->itr) {
2489 /* do an exponential smoothing */
2490 rx_itr = (10 * rx_itr * rxr->itr) /
2491 ((9 * rx_itr) + rxr->itr);
2492 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2493 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2494 que->me), rxr->itr);
2496 } else { /* We may have have toggled to non-dynamic */
2497 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2498 vsi->rx_itr_setting = pf->rx_itr;
2499 /* Update the hardware if needed */
2500 if (rxr->itr != vsi->rx_itr_setting) {
2501 rxr->itr = vsi->rx_itr_setting;
2502 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2503 que->me), rxr->itr);
2513 ** Provide a update to the queue TX
2514 ** interrupt moderation value.
2517 ixl_set_queue_tx_itr(struct ixl_queue *que)
2519 struct ixl_vsi *vsi = que->vsi;
2520 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2521 struct i40e_hw *hw = vsi->hw;
2522 struct tx_ring *txr = &que->txr;
2528 /* Idle, do nothing */
2529 if (txr->bytes == 0)
2532 if (pf->dynamic_tx_itr) {
2533 tx_bytes = txr->bytes/txr->itr;
2536 switch (txr->latency) {
2537 case IXL_LOW_LATENCY:
2538 if (tx_bytes > 10) {
2539 tx_latency = IXL_AVE_LATENCY;
2540 tx_itr = IXL_ITR_20K;
2543 case IXL_AVE_LATENCY:
2544 if (tx_bytes > 20) {
2545 tx_latency = IXL_BULK_LATENCY;
2546 tx_itr = IXL_ITR_8K;
2547 } else if (tx_bytes <= 10) {
2548 tx_latency = IXL_LOW_LATENCY;
2549 tx_itr = IXL_ITR_100K;
2552 case IXL_BULK_LATENCY:
2553 if (tx_bytes <= 20) {
2554 tx_latency = IXL_AVE_LATENCY;
2555 tx_itr = IXL_ITR_20K;
2560 txr->latency = tx_latency;
2562 if (tx_itr != txr->itr) {
2563 /* do an exponential smoothing */
2564 tx_itr = (10 * tx_itr * txr->itr) /
2565 ((9 * tx_itr) + txr->itr);
2566 txr->itr = min(tx_itr, IXL_MAX_ITR);
2567 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2568 que->me), txr->itr);
2571 } else { /* We may have have toggled to non-dynamic */
2572 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2573 vsi->tx_itr_setting = pf->tx_itr;
2574 /* Update the hardware if needed */
2575 if (txr->itr != vsi->tx_itr_setting) {
2576 txr->itr = vsi->tx_itr_setting;
2577 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2578 que->me), txr->itr);
2587 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2588 struct sysctl_ctx_list *ctx, const char *sysctl_name)
2590 struct sysctl_oid *tree;
2591 struct sysctl_oid_list *child;
2592 struct sysctl_oid_list *vsi_list;
2594 tree = device_get_sysctl_tree(pf->dev);
2595 child = SYSCTL_CHILDREN(tree);
2596 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2597 CTLFLAG_RD, NULL, "VSI Number");
2598 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2600 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2605 * ixl_sysctl_qtx_tail_handler
2606 * Retrieves I40E_QTX_TAIL value from hardware
2610 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2612 struct ixl_queue *que;
2616 que = ((struct ixl_queue *)oidp->oid_arg1);
2619 val = rd32(que->vsi->hw, que->txr.tail);
2620 error = sysctl_handle_int(oidp, &val, 0, req);
2621 if (error || !req->newptr)
2627 * ixl_sysctl_qrx_tail_handler
2628 * Retrieves I40E_QRX_TAIL value from hardware
2632 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2634 struct ixl_queue *que;
2638 que = ((struct ixl_queue *)oidp->oid_arg1);
2641 val = rd32(que->vsi->hw, que->rxr.tail);
2642 error = sysctl_handle_int(oidp, &val, 0, req);
2643 if (error || !req->newptr)
2650 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2651 * Writes to the ITR registers immediately.
2654 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2656 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2657 device_t dev = pf->dev;
2659 int requested_tx_itr;
2661 requested_tx_itr = pf->tx_itr;
2662 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2663 if ((error) || (req->newptr == NULL))
2665 if (pf->dynamic_tx_itr) {
2667 "Cannot set TX itr value while dynamic TX itr is enabled\n");
2670 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2672 "Invalid TX itr value; value must be between 0 and %d\n",
2677 pf->tx_itr = requested_tx_itr;
2678 ixl_configure_tx_itr(pf);
2684 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2685 * Writes to the ITR registers immediately.
2688 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2690 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2691 device_t dev = pf->dev;
2693 int requested_rx_itr;
2695 requested_rx_itr = pf->rx_itr;
2696 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2697 if ((error) || (req->newptr == NULL))
2699 if (pf->dynamic_rx_itr) {
2701 "Cannot set RX itr value while dynamic RX itr is enabled\n");
2704 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2706 "Invalid RX itr value; value must be between 0 and %d\n",
2711 pf->rx_itr = requested_rx_itr;
2712 ixl_configure_rx_itr(pf);
2718 ixl_add_hw_stats(struct ixl_pf *pf)
2720 device_t dev = pf->dev;
2721 struct ixl_vsi *vsi = &pf->vsi;
2722 struct ixl_queue *queues = vsi->queues;
2723 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2725 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2726 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2727 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2728 struct sysctl_oid_list *vsi_list;
2730 struct sysctl_oid *queue_node;
2731 struct sysctl_oid_list *queue_list;
2733 struct tx_ring *txr;
2734 struct rx_ring *rxr;
2735 char queue_namebuf[QUEUE_NAME_LEN];
2737 /* Driver statistics */
2738 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2739 CTLFLAG_RD, &pf->watchdog_events,
2740 "Watchdog timeouts");
2741 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2742 CTLFLAG_RD, &pf->admin_irq,
2743 "Admin Queue IRQ Handled");
2745 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2746 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2748 /* Queue statistics */
2749 for (int q = 0; q < vsi->num_queues; q++) {
2750 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2751 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2752 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2753 queue_list = SYSCTL_CHILDREN(queue_node);
2755 txr = &(queues[q].txr);
2756 rxr = &(queues[q].rxr);
2758 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2759 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2760 "m_defrag() failed");
2761 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2762 CTLFLAG_RD, &(queues[q].irqs),
2763 "irqs on this queue");
2764 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2765 CTLFLAG_RD, &(queues[q].tso),
2767 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2768 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2769 "Driver tx dma failure in xmit");
2770 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2771 CTLFLAG_RD, &(queues[q].mss_too_small),
2772 "TSO sends with an MSS less than 64");
2773 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2774 CTLFLAG_RD, &(txr->no_desc),
2775 "Queue No Descriptor Available");
2776 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2777 CTLFLAG_RD, &(txr->total_packets),
2778 "Queue Packets Transmitted");
2779 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2780 CTLFLAG_RD, &(txr->tx_bytes),
2781 "Queue Bytes Transmitted");
2782 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2783 CTLFLAG_RD, &(rxr->rx_packets),
2784 "Queue Packets Received");
2785 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2786 CTLFLAG_RD, &(rxr->rx_bytes),
2787 "Queue Bytes Received");
2788 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2789 CTLFLAG_RD, &(rxr->desc_errs),
2790 "Queue Rx Descriptor Errors");
2791 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2792 CTLFLAG_RD, &(rxr->itr), 0,
2793 "Queue Rx ITR Interval");
2794 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2795 CTLFLAG_RD, &(txr->itr), 0,
2796 "Queue Tx ITR Interval");
2798 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2799 CTLFLAG_RD, &(rxr->not_done),
2800 "Queue Rx Descriptors not Done");
2801 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2802 CTLFLAG_RD, &(rxr->next_refresh), 0,
2803 "Queue Rx Descriptors not Done");
2804 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2805 CTLFLAG_RD, &(rxr->next_check), 0,
2806 "Queue Rx Descriptors not Done");
2807 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2808 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2809 sizeof(struct ixl_queue),
2810 ixl_sysctl_qtx_tail_handler, "IU",
2811 "Queue Transmit Descriptor Tail");
2812 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2813 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2814 sizeof(struct ixl_queue),
2815 ixl_sysctl_qrx_tail_handler, "IU",
2816 "Queue Receive Descriptor Tail");
2821 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2825 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2826 struct sysctl_oid_list *child,
2827 struct i40e_eth_stats *eth_stats)
2829 struct ixl_sysctl_info ctls[] =
2831 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2832 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
2833 "Unicast Packets Received"},
2834 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
2835 "Multicast Packets Received"},
2836 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
2837 "Broadcast Packets Received"},
2838 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2839 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2840 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2841 {ð_stats->tx_multicast, "mcast_pkts_txd",
2842 "Multicast Packets Transmitted"},
2843 {ð_stats->tx_broadcast, "bcast_pkts_txd",
2844 "Broadcast Packets Transmitted"},
2849 struct ixl_sysctl_info *entry = ctls;
2850 while (entry->stat != 0)
2852 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2853 CTLFLAG_RD, entry->stat,
2854 entry->description);
2860 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2861 struct sysctl_oid_list *child,
2862 struct i40e_hw_port_stats *stats)
2864 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2865 CTLFLAG_RD, NULL, "Mac Statistics");
2866 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2868 struct i40e_eth_stats *eth_stats = &stats->eth;
2869 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2871 struct ixl_sysctl_info ctls[] =
2873 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2874 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2875 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2876 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2877 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2878 /* Packet Reception Stats */
2879 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2880 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2881 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2882 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2883 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2884 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2885 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2886 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2887 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2888 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2889 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2890 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2891 /* Packet Transmission Stats */
2892 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2893 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2894 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2895 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2896 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2897 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2898 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2900 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2901 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2902 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2903 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2908 struct ixl_sysctl_info *entry = ctls;
2909 while (entry->stat != 0)
2911 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2912 CTLFLAG_RD, entry->stat,
2913 entry->description);
2919 ixl_set_rss_key(struct ixl_pf *pf)
2921 struct i40e_hw *hw = &pf->hw;
2922 struct ixl_vsi *vsi = &pf->vsi;
2923 device_t dev = pf->dev;
2924 enum i40e_status_code status;
2926 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2928 u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2929 0x183cfd8c, 0xce880440, 0x580cbc3c,
2930 0x35897377, 0x328b25e1, 0x4fa98922,
2931 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2936 /* Fetch the configured RSS key */
2937 rss_getkey((uint8_t *) &rss_seed);
2939 /* Fill out hash function seed */
2940 if (hw->mac.type == I40E_MAC_X722) {
2941 struct i40e_aqc_get_set_rss_key_data key_data;
2942 bcopy(rss_seed, key_data.standard_rss_key, 40);
2943 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2945 device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2946 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2948 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2949 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2954 * Configure enabled PCTYPES for RSS.
2957 ixl_set_rss_pctypes(struct ixl_pf *pf)
2959 struct i40e_hw *hw = &pf->hw;
2960 u64 set_hena = 0, hena;
2963 u32 rss_hash_config;
2965 rss_hash_config = rss_gethashconfig();
2966 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2967 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2968 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2969 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2970 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2971 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2972 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2973 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2974 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2975 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2976 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2977 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2978 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2979 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2981 if (hw->mac.type == I40E_MAC_X722)
2982 set_hena = IXL_DEFAULT_RSS_HENA_X722;
2984 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2986 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2987 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2989 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2990 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2995 ixl_set_rss_hlut(struct ixl_pf *pf)
2997 struct i40e_hw *hw = &pf->hw;
2998 device_t dev = pf->dev;
2999 struct ixl_vsi *vsi = &pf->vsi;
3001 int lut_entry_width;
3003 enum i40e_status_code status;
3005 if (hw->mac.type == I40E_MAC_X722)
3006 lut_entry_width = 7;
3008 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3010 /* Populate the LUT with max no. of queues in round robin fashion */
3012 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3015 * Fetch the RSS bucket id for the given indirection entry.
3016 * Cap it at the number of configured buckets (which is
3019 que_id = rss_get_indirection_to_bucket(i);
3020 que_id = que_id % vsi->num_queues;
3022 que_id = i % vsi->num_queues;
3024 lut = (que_id & ((0x1 << lut_entry_width) - 1));
3028 if (hw->mac.type == I40E_MAC_X722) {
3029 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3031 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3032 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3034 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3035 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3041 ** Setup the PF's RSS parameters.
3044 ixl_config_rss(struct ixl_pf *pf)
3046 ixl_set_rss_key(pf);
3047 ixl_set_rss_pctypes(pf);
3048 ixl_set_rss_hlut(pf);
3052 ** This routine is run via an vlan config EVENT,
3053 ** it enables us to use the HW Filter table since
3054 ** we can get the vlan id. This just creates the
3055 ** entry in the soft version of the VFTA, init will
3056 ** repopulate the real table.
3059 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3061 struct ixl_vsi *vsi = ifp->if_softc;
3062 struct i40e_hw *hw = vsi->hw;
3063 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3065 if (ifp->if_softc != arg) /* Not our event */
3068 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3073 ixl_add_filter(vsi, hw->mac.addr, vtag);
3078 ** This routine is run via an vlan
3079 ** unconfig EVENT, remove our entry
3080 ** in the soft vfta.
3083 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3085 struct ixl_vsi *vsi = ifp->if_softc;
3086 struct i40e_hw *hw = vsi->hw;
3087 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3089 if (ifp->if_softc != arg)
3092 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3097 ixl_del_filter(vsi, hw->mac.addr, vtag);
3102 ** This routine updates vlan filters, called by init
3103 ** it scans the filter table and then updates the hw
3104 ** after a soft reset.
3107 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3109 struct ixl_mac_filter *f;
3112 if (vsi->num_vlans == 0)
3115 ** Scan the filter list for vlan entries,
3116 ** mark them for addition and then call
3117 ** for the AQ update.
3119 SLIST_FOREACH(f, &vsi->ftl, next) {
3120 if (f->flags & IXL_FILTER_VLAN) {
3128 printf("setup vlan: no filters found!\n");
3131 flags = IXL_FILTER_VLAN;
3132 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3133 ixl_add_hw_filters(vsi, flags, cnt);
3138 ** Initialize filter list and add filters that the hardware
3139 ** needs to know about.
3141 ** Requires VSI's filter list & seid to be set before calling.
3144 ixl_init_filters(struct ixl_vsi *vsi)
3146 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3148 /* Add broadcast address */
3149 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3152 * Prevent Tx flow control frames from being sent out by
3153 * non-firmware transmitters.
3154 * This affects every VSI in the PF.
3156 if (pf->enable_tx_fc_filter)
3157 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3161 ** This routine adds mulicast filters
3164 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3166 struct ixl_mac_filter *f;
3168 /* Does one already exist */
3169 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3173 f = ixl_get_filter(vsi);
3175 printf("WARNING: no filter available!!\n");
3178 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3179 f->vlan = IXL_VLAN_ANY;
3180 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3187 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3189 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3193 ** This routine adds macvlan filters
3196 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3198 struct ixl_mac_filter *f, *tmp;
3202 DEBUGOUT("ixl_add_filter: begin");
3207 /* Does one already exist */
3208 f = ixl_find_filter(vsi, macaddr, vlan);
3212 ** Is this the first vlan being registered, if so we
3213 ** need to remove the ANY filter that indicates we are
3214 ** not in a vlan, and replace that with a 0 filter.
3216 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3217 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3219 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3220 ixl_add_filter(vsi, macaddr, 0);
3224 f = ixl_get_filter(vsi);
3226 device_printf(dev, "WARNING: no filter available!!\n");
3229 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3231 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3232 if (f->vlan != IXL_VLAN_ANY)
3233 f->flags |= IXL_FILTER_VLAN;
3237 ixl_add_hw_filters(vsi, f->flags, 1);
3242 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3244 struct ixl_mac_filter *f;
3246 f = ixl_find_filter(vsi, macaddr, vlan);
3250 f->flags |= IXL_FILTER_DEL;
3251 ixl_del_hw_filters(vsi, 1);
3254 /* Check if this is the last vlan removal */
3255 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3256 /* Switch back to a non-vlan filter */
3257 ixl_del_filter(vsi, macaddr, 0);
3258 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3264 ** Find the filter with both matching mac addr and vlan id
3266 struct ixl_mac_filter *
3267 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3269 struct ixl_mac_filter *f;
3272 SLIST_FOREACH(f, &vsi->ftl, next) {
3273 if (!cmp_etheraddr(f->macaddr, macaddr))
3275 if (f->vlan == vlan) {
3287 ** This routine takes additions to the vsi filter
3288 ** table and creates an Admin Queue call to create
3289 ** the filters in the hardware.
3292 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3294 struct i40e_aqc_add_macvlan_element_data *a, *b;
3295 struct ixl_mac_filter *f;
3304 IXL_PF_LOCK_ASSERT(pf);
3306 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3307 M_DEVBUF, M_NOWAIT | M_ZERO);
3309 device_printf(dev, "add_hw_filters failed to get memory\n");
3314 ** Scan the filter list, each time we find one
3315 ** we add it to the admin queue array and turn off
3318 SLIST_FOREACH(f, &vsi->ftl, next) {
3319 if (f->flags == flags) {
3320 b = &a[j]; // a pox on fvl long names :)
3321 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3322 if (f->vlan == IXL_VLAN_ANY) {
3324 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3326 b->vlan_tag = f->vlan;
3329 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3330 f->flags &= ~IXL_FILTER_ADD;
3337 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3339 device_printf(dev, "aq_add_macvlan err %d, "
3340 "aq_error %d\n", err, hw->aq.asq_last_status);
3342 vsi->hw_filters_add += j;
3349 ** This routine takes removals in the vsi filter
3350 ** table and creates an Admin Queue call to delete
3351 ** the filters in the hardware.
3354 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3356 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3360 struct ixl_mac_filter *f, *f_temp;
3363 DEBUGOUT("ixl_del_hw_filters: begin\n");
3369 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3370 M_DEVBUF, M_NOWAIT | M_ZERO);
3372 printf("del hw filter failed to get memory\n");
3376 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3377 if (f->flags & IXL_FILTER_DEL) {
3378 e = &d[j]; // a pox on fvl long names :)
3379 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3380 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3381 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3382 /* delete entry from vsi list */
3383 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3391 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3392 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3394 for (int i = 0; i < j; i++)
3395 sc += (!d[i].error_code);
3396 vsi->hw_filters_del += sc;
3398 "Failed to remove %d/%d filters, aq error %d\n",
3399 j - sc, j, hw->aq.asq_last_status);
3401 vsi->hw_filters_del += j;
3405 DEBUGOUT("ixl_del_hw_filters: end\n");
3410 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3412 struct i40e_hw *hw = &pf->hw;
3417 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3419 ixl_dbg(pf, IXL_DBG_EN_DIS,
3420 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3423 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3425 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3426 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3427 I40E_QTX_ENA_QENA_STAT_MASK;
3428 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3429 /* Verify the enable took */
3430 for (int j = 0; j < 10; j++) {
3431 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3432 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3434 i40e_msec_delay(10);
3436 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3437 device_printf(pf->dev, "TX queue %d still disabled!\n",
3446 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3448 struct i40e_hw *hw = &pf->hw;
3453 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3455 ixl_dbg(pf, IXL_DBG_EN_DIS,
3456 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3459 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3460 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3461 I40E_QRX_ENA_QENA_STAT_MASK;
3462 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3463 /* Verify the enable took */
3464 for (int j = 0; j < 10; j++) {
3465 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3466 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3468 i40e_msec_delay(10);
3470 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3471 device_printf(pf->dev, "RX queue %d still disabled!\n",
3480 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3484 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3485 /* Called function already prints error message */
3488 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3492 /* For PF VSI only */
3494 ixl_enable_rings(struct ixl_vsi *vsi)
3496 struct ixl_pf *pf = vsi->back;
3499 for (int i = 0; i < vsi->num_queues; i++) {
3500 error = ixl_enable_ring(pf, &pf->qtag, i);
3509 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3511 struct i40e_hw *hw = &pf->hw;
3516 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3518 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3519 i40e_usec_delay(500);
3521 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3522 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3523 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3524 /* Verify the disable took */
3525 for (int j = 0; j < 10; j++) {
3526 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3527 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3529 i40e_msec_delay(10);
3531 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3532 device_printf(pf->dev, "TX queue %d still enabled!\n",
3541 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3543 struct i40e_hw *hw = &pf->hw;
3548 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3550 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3551 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3552 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3553 /* Verify the disable took */
3554 for (int j = 0; j < 10; j++) {
3555 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3556 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3558 i40e_msec_delay(10);
3560 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3561 device_printf(pf->dev, "RX queue %d still enabled!\n",
3570 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3574 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3575 /* Called function already prints error message */
3578 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3582 /* For PF VSI only */
3584 ixl_disable_rings(struct ixl_vsi *vsi)
3586 struct ixl_pf *pf = vsi->back;
3589 for (int i = 0; i < vsi->num_queues; i++) {
3590 error = ixl_disable_ring(pf, &pf->qtag, i);
3599 * ixl_handle_mdd_event
3601 * Called from interrupt handler to identify possibly malicious vfs
3602 * (But also detects events from the PF, as well)
3605 ixl_handle_mdd_event(struct ixl_pf *pf)
3607 struct i40e_hw *hw = &pf->hw;
3608 device_t dev = pf->dev;
3609 bool mdd_detected = false;
3610 bool pf_mdd_detected = false;
3613 /* find what triggered the MDD event */
3614 reg = rd32(hw, I40E_GL_MDET_TX);
3615 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3616 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3617 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3618 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3619 I40E_GL_MDET_TX_EVENT_SHIFT;
3620 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3621 I40E_GL_MDET_TX_QUEUE_SHIFT;
3623 "Malicious Driver Detection event %d"
3624 " on TX queue %d, pf number %d\n",
3625 event, queue, pf_num);
3626 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3627 mdd_detected = true;
3629 reg = rd32(hw, I40E_GL_MDET_RX);
3630 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3631 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3632 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3633 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3634 I40E_GL_MDET_RX_EVENT_SHIFT;
3635 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3636 I40E_GL_MDET_RX_QUEUE_SHIFT;
3638 "Malicious Driver Detection event %d"
3639 " on RX queue %d, pf number %d\n",
3640 event, queue, pf_num);
3641 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3642 mdd_detected = true;
3646 reg = rd32(hw, I40E_PF_MDET_TX);
3647 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3648 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3650 "MDD TX event is for this function!");
3651 pf_mdd_detected = true;
3653 reg = rd32(hw, I40E_PF_MDET_RX);
3654 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3655 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3657 "MDD RX event is for this function!");
3658 pf_mdd_detected = true;
3662 /* re-enable mdd interrupt cause */
3663 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3664 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3665 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3670 ixl_enable_intr(struct ixl_vsi *vsi)
3672 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3673 struct i40e_hw *hw = vsi->hw;
3674 struct ixl_queue *que = vsi->queues;
3677 for (int i = 0; i < vsi->num_queues; i++, que++)
3678 ixl_enable_queue(hw, que->me);
3680 ixl_enable_intr0(hw);
3684 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3686 struct i40e_hw *hw = vsi->hw;
3687 struct ixl_queue *que = vsi->queues;
3689 for (int i = 0; i < vsi->num_queues; i++, que++)
3690 ixl_disable_queue(hw, que->me);
3694 ixl_enable_intr0(struct i40e_hw *hw)
3698 /* Use IXL_ITR_NONE so ITR isn't updated here */
3699 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3700 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3701 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3702 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3706 ixl_disable_intr0(struct i40e_hw *hw)
3710 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3711 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3716 ixl_enable_queue(struct i40e_hw *hw, int id)
3720 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3721 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3722 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3723 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3727 ixl_disable_queue(struct i40e_hw *hw, int id)
3731 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3732 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3736 ixl_update_stats_counters(struct ixl_pf *pf)
3738 struct i40e_hw *hw = &pf->hw;
3739 struct ixl_vsi *vsi = &pf->vsi;
3742 struct i40e_hw_port_stats *nsd = &pf->stats;
3743 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3745 /* Update hw stats */
3746 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3747 pf->stat_offsets_loaded,
3748 &osd->crc_errors, &nsd->crc_errors);
3749 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3750 pf->stat_offsets_loaded,
3751 &osd->illegal_bytes, &nsd->illegal_bytes);
3752 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3753 I40E_GLPRT_GORCL(hw->port),
3754 pf->stat_offsets_loaded,
3755 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3756 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3757 I40E_GLPRT_GOTCL(hw->port),
3758 pf->stat_offsets_loaded,
3759 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3760 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3761 pf->stat_offsets_loaded,
3762 &osd->eth.rx_discards,
3763 &nsd->eth.rx_discards);
3764 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3765 I40E_GLPRT_UPRCL(hw->port),
3766 pf->stat_offsets_loaded,
3767 &osd->eth.rx_unicast,
3768 &nsd->eth.rx_unicast);
3769 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3770 I40E_GLPRT_UPTCL(hw->port),
3771 pf->stat_offsets_loaded,
3772 &osd->eth.tx_unicast,
3773 &nsd->eth.tx_unicast);
3774 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3775 I40E_GLPRT_MPRCL(hw->port),
3776 pf->stat_offsets_loaded,
3777 &osd->eth.rx_multicast,
3778 &nsd->eth.rx_multicast);
3779 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3780 I40E_GLPRT_MPTCL(hw->port),
3781 pf->stat_offsets_loaded,
3782 &osd->eth.tx_multicast,
3783 &nsd->eth.tx_multicast);
3784 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3785 I40E_GLPRT_BPRCL(hw->port),
3786 pf->stat_offsets_loaded,
3787 &osd->eth.rx_broadcast,
3788 &nsd->eth.rx_broadcast);
3789 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3790 I40E_GLPRT_BPTCL(hw->port),
3791 pf->stat_offsets_loaded,
3792 &osd->eth.tx_broadcast,
3793 &nsd->eth.tx_broadcast);
3795 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3796 pf->stat_offsets_loaded,
3797 &osd->tx_dropped_link_down,
3798 &nsd->tx_dropped_link_down);
3799 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3800 pf->stat_offsets_loaded,
3801 &osd->mac_local_faults,
3802 &nsd->mac_local_faults);
3803 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3804 pf->stat_offsets_loaded,
3805 &osd->mac_remote_faults,
3806 &nsd->mac_remote_faults);
3807 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3808 pf->stat_offsets_loaded,
3809 &osd->rx_length_errors,
3810 &nsd->rx_length_errors);
3812 /* Flow control (LFC) stats */
3813 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3814 pf->stat_offsets_loaded,
3815 &osd->link_xon_rx, &nsd->link_xon_rx);
3816 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3817 pf->stat_offsets_loaded,
3818 &osd->link_xon_tx, &nsd->link_xon_tx);
3819 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3820 pf->stat_offsets_loaded,
3821 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3822 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3823 pf->stat_offsets_loaded,
3824 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3826 /* Packet size stats rx */
3827 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3828 I40E_GLPRT_PRC64L(hw->port),
3829 pf->stat_offsets_loaded,
3830 &osd->rx_size_64, &nsd->rx_size_64);
3831 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3832 I40E_GLPRT_PRC127L(hw->port),
3833 pf->stat_offsets_loaded,
3834 &osd->rx_size_127, &nsd->rx_size_127);
3835 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3836 I40E_GLPRT_PRC255L(hw->port),
3837 pf->stat_offsets_loaded,
3838 &osd->rx_size_255, &nsd->rx_size_255);
3839 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3840 I40E_GLPRT_PRC511L(hw->port),
3841 pf->stat_offsets_loaded,
3842 &osd->rx_size_511, &nsd->rx_size_511);
3843 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3844 I40E_GLPRT_PRC1023L(hw->port),
3845 pf->stat_offsets_loaded,
3846 &osd->rx_size_1023, &nsd->rx_size_1023);
3847 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3848 I40E_GLPRT_PRC1522L(hw->port),
3849 pf->stat_offsets_loaded,
3850 &osd->rx_size_1522, &nsd->rx_size_1522);
3851 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3852 I40E_GLPRT_PRC9522L(hw->port),
3853 pf->stat_offsets_loaded,
3854 &osd->rx_size_big, &nsd->rx_size_big);
3856 /* Packet size stats tx */
3857 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3858 I40E_GLPRT_PTC64L(hw->port),
3859 pf->stat_offsets_loaded,
3860 &osd->tx_size_64, &nsd->tx_size_64);
3861 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3862 I40E_GLPRT_PTC127L(hw->port),
3863 pf->stat_offsets_loaded,
3864 &osd->tx_size_127, &nsd->tx_size_127);
3865 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3866 I40E_GLPRT_PTC255L(hw->port),
3867 pf->stat_offsets_loaded,
3868 &osd->tx_size_255, &nsd->tx_size_255);
3869 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3870 I40E_GLPRT_PTC511L(hw->port),
3871 pf->stat_offsets_loaded,
3872 &osd->tx_size_511, &nsd->tx_size_511);
3873 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3874 I40E_GLPRT_PTC1023L(hw->port),
3875 pf->stat_offsets_loaded,
3876 &osd->tx_size_1023, &nsd->tx_size_1023);
3877 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3878 I40E_GLPRT_PTC1522L(hw->port),
3879 pf->stat_offsets_loaded,
3880 &osd->tx_size_1522, &nsd->tx_size_1522);
3881 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3882 I40E_GLPRT_PTC9522L(hw->port),
3883 pf->stat_offsets_loaded,
3884 &osd->tx_size_big, &nsd->tx_size_big);
3886 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3887 pf->stat_offsets_loaded,
3888 &osd->rx_undersize, &nsd->rx_undersize);
3889 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3890 pf->stat_offsets_loaded,
3891 &osd->rx_fragments, &nsd->rx_fragments);
3892 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3893 pf->stat_offsets_loaded,
3894 &osd->rx_oversize, &nsd->rx_oversize);
3895 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3896 pf->stat_offsets_loaded,
3897 &osd->rx_jabber, &nsd->rx_jabber);
3898 pf->stat_offsets_loaded = true;
3901 /* Update vsi stats */
3902 ixl_update_vsi_stats(vsi);
3904 for (int i = 0; i < pf->num_vfs; i++) {
3906 if (vf->vf_flags & VF_FLAG_ENABLED)
3907 ixl_update_eth_stats(&pf->vfs[i].vsi);
3912 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3914 struct i40e_hw *hw = &pf->hw;
3915 struct ixl_vsi *vsi = &pf->vsi;
3916 device_t dev = pf->dev;
3920 is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3925 error = i40e_shutdown_lan_hmc(hw);
3928 "Shutdown LAN HMC failed with code %d\n", error);
3929 ixl_disable_intr0(hw);
3930 ixl_teardown_adminq_msix(pf);
3931 error = i40e_shutdown_adminq(hw);
3934 "Shutdown Admin queue failed with code %d\n", error);
3937 error = i40e_init_adminq(hw);
3938 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3939 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3942 error = ixl_setup_adminq_msix(pf);
3944 device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3947 ixl_configure_intr0_msix(pf);
3948 ixl_enable_intr0(hw);
3949 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3950 hw->func_caps.num_rx_qp, 0, 0);
3952 device_printf(dev, "init_lan_hmc failed: %d\n", error);
3954 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3956 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3965 ixl_handle_empr_reset(struct ixl_pf *pf)
3967 struct i40e_hw *hw = &pf->hw;
3968 device_t dev = pf->dev;
3972 /* Typically finishes within 3-4 seconds */
3973 while (count++ < 100) {
3974 reg = rd32(hw, I40E_GLGEN_RSTAT)
3975 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3977 i40e_msec_delay(100);
3981 ixl_dbg(pf, IXL_DBG_INFO,
3982 "EMPR reset wait count: %d\n", count);
3984 device_printf(dev, "Rebuilding driver state...\n");
3985 ixl_rebuild_hw_structs_after_reset(pf);
3986 device_printf(dev, "Rebuilding driver state done.\n");
3988 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
3992 ** Tasklet handler for MSIX Adminq interrupts
3993 ** - do outside interrupt since it might sleep
3996 ixl_do_adminq(void *context, int pending)
3998 struct ixl_pf *pf = context;
3999 struct i40e_hw *hw = &pf->hw;
4000 struct i40e_arq_event_info event;
4002 device_t dev = pf->dev;
4006 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4007 /* Flag cleared at end of this function */
4008 ixl_handle_empr_reset(pf);
4012 /* Admin Queue handling */
4013 event.buf_len = IXL_AQ_BUF_SZ;
4014 event.msg_buf = malloc(event.buf_len,
4015 M_DEVBUF, M_NOWAIT | M_ZERO);
4016 if (!event.msg_buf) {
4017 device_printf(dev, "%s: Unable to allocate memory for Admin"
4018 " Queue event!\n", __func__);
4023 /* clean and process any events */
4025 ret = i40e_clean_arq_element(hw, &event, &result);
4028 opcode = LE16_TO_CPU(event.desc.opcode);
4029 ixl_dbg(pf, IXL_DBG_AQ,
4030 "Admin Queue event: %#06x\n", opcode);
4032 case i40e_aqc_opc_get_link_status:
4033 ixl_link_event(pf, &event);
4035 case i40e_aqc_opc_send_msg_to_pf:
4037 ixl_handle_vf_msg(pf, &event);
4040 case i40e_aqc_opc_event_lan_overflow:
4045 } while (result && (loop++ < IXL_ADM_LIMIT));
4047 free(event.msg_buf, M_DEVBUF);
4050 * If there are still messages to process, reschedule ourselves.
4051 * Otherwise, re-enable our interrupt.
4054 taskqueue_enqueue(pf->tq, &pf->adminq);
4056 ixl_enable_intr0(hw);
4062 * Update VSI-specific ethernet statistics counters.
4065 ixl_update_eth_stats(struct ixl_vsi *vsi)
4067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4068 struct i40e_hw *hw = &pf->hw;
4069 struct i40e_eth_stats *es;
4070 struct i40e_eth_stats *oes;
4071 struct i40e_hw_port_stats *nsd;
4072 u16 stat_idx = vsi->info.stat_counter_idx;
4074 es = &vsi->eth_stats;
4075 oes = &vsi->eth_stats_offsets;
4078 /* Gather up the stats that the hw collects */
4079 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4080 vsi->stat_offsets_loaded,
4081 &oes->tx_errors, &es->tx_errors);
4082 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4083 vsi->stat_offsets_loaded,
4084 &oes->rx_discards, &es->rx_discards);
4086 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4087 I40E_GLV_GORCL(stat_idx),
4088 vsi->stat_offsets_loaded,
4089 &oes->rx_bytes, &es->rx_bytes);
4090 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4091 I40E_GLV_UPRCL(stat_idx),
4092 vsi->stat_offsets_loaded,
4093 &oes->rx_unicast, &es->rx_unicast);
4094 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4095 I40E_GLV_MPRCL(stat_idx),
4096 vsi->stat_offsets_loaded,
4097 &oes->rx_multicast, &es->rx_multicast);
4098 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4099 I40E_GLV_BPRCL(stat_idx),
4100 vsi->stat_offsets_loaded,
4101 &oes->rx_broadcast, &es->rx_broadcast);
4103 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4104 I40E_GLV_GOTCL(stat_idx),
4105 vsi->stat_offsets_loaded,
4106 &oes->tx_bytes, &es->tx_bytes);
4107 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4108 I40E_GLV_UPTCL(stat_idx),
4109 vsi->stat_offsets_loaded,
4110 &oes->tx_unicast, &es->tx_unicast);
4111 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4112 I40E_GLV_MPTCL(stat_idx),
4113 vsi->stat_offsets_loaded,
4114 &oes->tx_multicast, &es->tx_multicast);
4115 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4116 I40E_GLV_BPTCL(stat_idx),
4117 vsi->stat_offsets_loaded,
4118 &oes->tx_broadcast, &es->tx_broadcast);
4119 vsi->stat_offsets_loaded = true;
4123 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4127 struct i40e_eth_stats *es;
4130 struct i40e_hw_port_stats *nsd;
4134 es = &vsi->eth_stats;
4137 ixl_update_eth_stats(vsi);
4139 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4140 for (int i = 0; i < vsi->num_queues; i++)
4141 tx_discards += vsi->queues[i].txr.br->br_drops;
4143 /* Update ifnet stats */
4144 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4147 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4150 IXL_SET_IBYTES(vsi, es->rx_bytes);
4151 IXL_SET_OBYTES(vsi, es->tx_bytes);
4152 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4153 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4155 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4156 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4158 IXL_SET_OERRORS(vsi, es->tx_errors);
4159 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4160 IXL_SET_OQDROPS(vsi, tx_discards);
4161 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4162 IXL_SET_COLLISIONS(vsi, 0);
4166 * Reset all of the stats for the given pf
4169 ixl_pf_reset_stats(struct ixl_pf *pf)
4171 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4172 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4173 pf->stat_offsets_loaded = false;
4177 * Resets all stats of the given vsi
4180 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4182 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4183 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4184 vsi->stat_offsets_loaded = false;
4188 * Read and update a 48 bit stat from the hw
4190 * Since the device stats are not reset at PFReset, they likely will not
4191 * be zeroed when the driver starts. We'll save the first values read
4192 * and use them as offsets to be subtracted from the raw values in order
4193 * to report stats that count from zero.
4196 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4197 bool offset_loaded, u64 *offset, u64 *stat)
4201 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4202 new_data = rd64(hw, loreg);
4205 * Use two rd32's instead of one rd64; FreeBSD versions before
4206 * 10 don't support 64-bit bus reads/writes.
4208 new_data = rd32(hw, loreg);
4209 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4214 if (new_data >= *offset)
4215 *stat = new_data - *offset;
4217 *stat = (new_data + ((u64)1 << 48)) - *offset;
4218 *stat &= 0xFFFFFFFFFFFFULL;
4222 * Read and update a 32 bit stat from the hw
4225 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4226 bool offset_loaded, u64 *offset, u64 *stat)
4230 new_data = rd32(hw, reg);
4233 if (new_data >= *offset)
4234 *stat = (u32)(new_data - *offset);
4236 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4240 ixl_add_device_sysctls(struct ixl_pf *pf)
4242 device_t dev = pf->dev;
4243 struct i40e_hw *hw = &pf->hw;
4245 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4246 struct sysctl_oid_list *ctx_list =
4247 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4249 struct sysctl_oid *debug_node;
4250 struct sysctl_oid_list *debug_list;
4252 struct sysctl_oid *fec_node;
4253 struct sysctl_oid_list *fec_list;
4255 /* Set up sysctls */
4256 SYSCTL_ADD_PROC(ctx, ctx_list,
4257 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4258 pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4260 SYSCTL_ADD_PROC(ctx, ctx_list,
4261 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4262 pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4264 SYSCTL_ADD_PROC(ctx, ctx_list,
4265 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4266 pf, 0, ixl_current_speed, "A", "Current Port Speed");
4268 SYSCTL_ADD_PROC(ctx, ctx_list,
4269 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4270 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4272 SYSCTL_ADD_PROC(ctx, ctx_list,
4273 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4274 pf, 0, ixl_sysctl_unallocated_queues, "I",
4275 "Queues not allocated to a PF or VF");
4277 SYSCTL_ADD_PROC(ctx, ctx_list,
4278 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4279 pf, 0, ixl_sysctl_pf_tx_itr, "I",
4280 "Immediately set TX ITR value for all queues");
4282 SYSCTL_ADD_PROC(ctx, ctx_list,
4283 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4284 pf, 0, ixl_sysctl_pf_rx_itr, "I",
4285 "Immediately set RX ITR value for all queues");
4287 SYSCTL_ADD_INT(ctx, ctx_list,
4288 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4289 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4291 SYSCTL_ADD_INT(ctx, ctx_list,
4292 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4293 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4295 /* Add FEC sysctls for 25G adapters */
4297 * XXX: These settings can be changed, but that isn't supported,
4298 * so these are read-only for now.
4300 if (hw->device_id == I40E_DEV_ID_25G_B
4301 || hw->device_id == I40E_DEV_ID_25G_SFP28) {
4302 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4303 OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4304 fec_list = SYSCTL_CHILDREN(fec_node);
4306 SYSCTL_ADD_PROC(ctx, fec_list,
4307 OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
4308 pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4310 SYSCTL_ADD_PROC(ctx, fec_list,
4311 OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
4312 pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4314 SYSCTL_ADD_PROC(ctx, fec_list,
4315 OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
4316 pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4318 SYSCTL_ADD_PROC(ctx, fec_list,
4319 OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
4320 pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4322 SYSCTL_ADD_PROC(ctx, fec_list,
4323 OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
4324 pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4327 /* Add sysctls meant to print debug information, but don't list them
4328 * in "sysctl -a" output. */
4329 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4330 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4331 debug_list = SYSCTL_CHILDREN(debug_node);
4333 SYSCTL_ADD_UINT(ctx, debug_list,
4334 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4335 &pf->hw.debug_mask, 0, "Shared code debug message level");
4337 SYSCTL_ADD_UINT(ctx, debug_list,
4338 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4339 &pf->dbg_mask, 0, "Non-hared code debug message level");
4341 SYSCTL_ADD_PROC(ctx, debug_list,
4342 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4343 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4345 SYSCTL_ADD_PROC(ctx, debug_list,
4346 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4347 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4349 SYSCTL_ADD_PROC(ctx, debug_list,
4350 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4351 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4353 SYSCTL_ADD_PROC(ctx, debug_list,
4354 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4355 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4357 SYSCTL_ADD_PROC(ctx, debug_list,
4358 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4359 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4361 SYSCTL_ADD_PROC(ctx, debug_list,
4362 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4363 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4365 SYSCTL_ADD_PROC(ctx, debug_list,
4366 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4367 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4369 SYSCTL_ADD_PROC(ctx, debug_list,
4370 OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4371 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4373 SYSCTL_ADD_PROC(ctx, debug_list,
4374 OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4375 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4378 SYSCTL_ADD_PROC(ctx, debug_list,
4379 OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4380 pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4382 SYSCTL_ADD_PROC(ctx, debug_list,
4383 OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4384 pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4388 SYSCTL_ADD_UINT(ctx, debug_list,
4389 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4390 0, "PF/VF Virtual Channel debug level");
4395 * Primarily for finding out how many queues can be assigned to VFs,
4399 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4401 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4405 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4408 return sysctl_handle_int(oidp, NULL, queues, req);
4412 ** Set flow control using sysctl:
4419 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4421 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4422 struct i40e_hw *hw = &pf->hw;
4423 device_t dev = pf->dev;
4424 int requested_fc, error = 0;
4425 enum i40e_status_code aq_error = 0;
4429 requested_fc = pf->fc;
4430 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4431 if ((error) || (req->newptr == NULL))
4433 if (requested_fc < 0 || requested_fc > 3) {
4435 "Invalid fc mode; valid modes are 0 through 3\n");
4439 /* Set fc ability for port */
4440 hw->fc.requested_mode = requested_fc;
4441 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4444 "%s: Error setting new fc mode %d; fc_err %#x\n",
4445 __func__, aq_error, fc_aq_err);
4448 pf->fc = requested_fc;
4450 /* Get new link state */
4451 i40e_msec_delay(250);
4452 hw->phy.get_link_info = TRUE;
4453 i40e_get_link_status(hw, &pf->link_up);
4459 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4473 switch (link_speed) {
4474 case I40E_LINK_SPEED_100MB:
4477 case I40E_LINK_SPEED_1GB:
4480 case I40E_LINK_SPEED_10GB:
4483 case I40E_LINK_SPEED_40GB:
4486 case I40E_LINK_SPEED_20GB:
4489 case I40E_LINK_SPEED_25GB:
4492 case I40E_LINK_SPEED_UNKNOWN:
4498 return speeds[index];
4502 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4504 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4505 struct i40e_hw *hw = &pf->hw;
4508 ixl_update_link_status(pf);
4510 error = sysctl_handle_string(oidp,
4511 ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4517 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4519 static u16 speedmap[6] = {
4520 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4521 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
4522 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
4523 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
4524 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
4525 (I40E_LINK_SPEED_40GB | (0x20 << 8))
4529 for (int i = 0; i < 6; i++) {
4531 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4533 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4540 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4542 struct i40e_hw *hw = &pf->hw;
4543 device_t dev = pf->dev;
4544 struct i40e_aq_get_phy_abilities_resp abilities;
4545 struct i40e_aq_set_phy_config config;
4546 enum i40e_status_code aq_error = 0;
4548 /* Get current capability information */
4549 aq_error = i40e_aq_get_phy_capabilities(hw,
4550 FALSE, FALSE, &abilities, NULL);
4553 "%s: Error getting phy capabilities %d,"
4554 " aq error: %d\n", __func__, aq_error,
4555 hw->aq.asq_last_status);
4559 /* Prepare new config */
4560 bzero(&config, sizeof(config));
4561 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4562 config.phy_type = abilities.phy_type;
4563 config.phy_type_ext = abilities.phy_type_ext;
4564 config.abilities = abilities.abilities
4565 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4566 config.eee_capability = abilities.eee_capability;
4567 config.eeer = abilities.eeer_val;
4568 config.low_power_ctrl = abilities.d3_lpan;
4570 /* Do aq command & restart link */
4571 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4574 "%s: Error setting new phy config %d,"
4575 " aq error: %d\n", __func__, aq_error,
4576 hw->aq.asq_last_status);
4584 ** Control link advertise speed:
4586 ** 0x1 - advertise 100 Mb
4587 ** 0x2 - advertise 1G
4588 ** 0x4 - advertise 10G
4589 ** 0x8 - advertise 20G
4590 ** 0x10 - advertise 25G
4591 ** 0x20 - advertise 40G
4593 ** Set to 0 to disable link
4596 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4598 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4599 struct i40e_hw *hw = &pf->hw;
4600 device_t dev = pf->dev;
4601 u8 converted_speeds;
4602 int requested_ls = 0;
4605 /* Read in new mode */
4606 requested_ls = pf->advertised_speed;
4607 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4608 if ((error) || (req->newptr == NULL))
4610 /* Check if changing speeds is supported */
4611 switch (hw->device_id) {
4612 case I40E_DEV_ID_25G_B:
4613 case I40E_DEV_ID_25G_SFP28:
4614 device_printf(dev, "Changing advertised speeds not supported"
4615 " on this device.\n");
4618 if (requested_ls < 0 || requested_ls > 0xff) {
4621 /* Check for valid value */
4622 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4623 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4624 device_printf(dev, "Invalid advertised speed; "
4625 "valid flags are: 0x%02x\n",
4626 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4630 error = ixl_set_advertised_speeds(pf, requested_ls);
4634 pf->advertised_speed = requested_ls;
4635 ixl_update_link_status(pf);
4640 * Input: bitmap of enum i40e_aq_link_speed
4643 ixl_max_aq_speed_to_value(u8 link_speeds)
4645 if (link_speeds & I40E_LINK_SPEED_40GB)
4647 if (link_speeds & I40E_LINK_SPEED_25GB)
4649 if (link_speeds & I40E_LINK_SPEED_20GB)
4651 if (link_speeds & I40E_LINK_SPEED_10GB)
4653 if (link_speeds & I40E_LINK_SPEED_1GB)
4655 if (link_speeds & I40E_LINK_SPEED_100MB)
4656 return IF_Mbps(100);
4658 /* Minimum supported link speed */
4659 return IF_Mbps(100);
4663 ** Get the width and transaction speed of
4664 ** the bus this adapter is plugged into.
4667 ixl_get_bus_info(struct ixl_pf *pf)
4669 struct i40e_hw *hw = &pf->hw;
4670 device_t dev = pf->dev;
4672 u32 offset, num_ports;
4675 /* Some devices don't use PCIE */
4676 if (hw->mac.type == I40E_MAC_X722)
4679 /* Read PCI Express Capabilities Link Status Register */
4680 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4681 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4683 /* Fill out hw struct with PCIE info */
4684 i40e_set_pci_config_data(hw, link);
4686 /* Use info to print out bandwidth messages */
4687 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4688 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4689 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4690 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4691 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4692 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4693 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4694 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4698 * If adapter is in slot with maximum supported speed,
4699 * no warning message needs to be printed out.
4701 if (hw->bus.speed >= i40e_bus_speed_8000
4702 && hw->bus.width >= i40e_bus_width_pcie_x8)
4705 num_ports = bitcount32(hw->func_caps.valid_functions);
4706 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4708 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4709 device_printf(dev, "PCI-Express bandwidth available"
4710 " for this device may be insufficient for"
4711 " optimal performance.\n");
4712 device_printf(dev, "Please move the device to a different"
4713 " PCI-e link with more lanes and/or higher"
4714 " transfer rate.\n");
4719 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4721 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4722 struct i40e_hw *hw = &pf->hw;
4725 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4726 ixl_nvm_version_str(hw, sbuf);
4734 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4736 if ((nvma->command == I40E_NVM_READ) &&
4737 ((nvma->config & 0xFF) == 0xF) &&
4738 (((nvma->config & 0xF00) >> 8) == 0xF) &&
4739 (nvma->offset == 0) &&
4740 (nvma->data_size == 1)) {
4741 // device_printf(dev, "- Get Driver Status Command\n");
4743 else if (nvma->command == I40E_NVM_READ) {
4747 switch (nvma->command) {
4749 device_printf(dev, "- command: I40E_NVM_READ\n");
4752 device_printf(dev, "- command: I40E_NVM_WRITE\n");
4755 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4759 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
4760 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4761 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4762 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4767 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4769 struct i40e_hw *hw = &pf->hw;
4770 struct i40e_nvm_access *nvma;
4771 device_t dev = pf->dev;
4772 enum i40e_status_code status = 0;
4775 DEBUGFUNC("ixl_handle_nvmupd_cmd");
4778 if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4779 ifd->ifd_data == NULL) {
4780 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4782 device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4783 __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4784 device_printf(dev, "%s: data pointer: %p\n", __func__,
4789 nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4791 if (pf->dbg_mask & IXL_DBG_NVMUPD)
4792 ixl_print_nvm_cmd(dev, nvma);
4794 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4796 while (count++ < 100) {
4797 i40e_msec_delay(100);
4798 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4803 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4805 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4812 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
4813 i40e_stat_str(hw, status), perrno);
4816 * -EPERM is actually ERESTART, which the kernel interprets as it needing
4817 * to run this ioctl again. So use -EACCES for -EPERM instead.
4819 if (perrno == -EPERM)
4825 /*********************************************************************
4827 * Media Ioctl callback
4829 * This routine is called whenever the user queries the status of
4830 * the interface using ifconfig.
4832 **********************************************************************/
4834 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4836 struct ixl_vsi *vsi = ifp->if_softc;
4837 struct ixl_pf *pf = vsi->back;
4838 struct i40e_hw *hw = &pf->hw;
4840 INIT_DEBUGOUT("ixl_media_status: begin");
4843 hw->phy.get_link_info = TRUE;
4844 i40e_get_link_status(hw, &pf->link_up);
4845 ixl_update_link_status(pf);
4847 ifmr->ifm_status = IFM_AVALID;
4848 ifmr->ifm_active = IFM_ETHER;
4855 ifmr->ifm_status |= IFM_ACTIVE;
4857 /* Hardware always does full-duplex */
4858 ifmr->ifm_active |= IFM_FDX;
4860 switch (hw->phy.link_info.phy_type) {
4862 case I40E_PHY_TYPE_100BASE_TX:
4863 ifmr->ifm_active |= IFM_100_TX;
4866 case I40E_PHY_TYPE_1000BASE_T:
4867 ifmr->ifm_active |= IFM_1000_T;
4869 case I40E_PHY_TYPE_1000BASE_SX:
4870 ifmr->ifm_active |= IFM_1000_SX;
4872 case I40E_PHY_TYPE_1000BASE_LX:
4873 ifmr->ifm_active |= IFM_1000_LX;
4875 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4876 ifmr->ifm_active |= IFM_OTHER;
4879 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4880 ifmr->ifm_active |= IFM_10G_TWINAX;
4882 case I40E_PHY_TYPE_10GBASE_SR:
4883 ifmr->ifm_active |= IFM_10G_SR;
4885 case I40E_PHY_TYPE_10GBASE_LR:
4886 ifmr->ifm_active |= IFM_10G_LR;
4888 case I40E_PHY_TYPE_10GBASE_T:
4889 ifmr->ifm_active |= IFM_10G_T;
4891 case I40E_PHY_TYPE_XAUI:
4892 case I40E_PHY_TYPE_XFI:
4893 case I40E_PHY_TYPE_10GBASE_AOC:
4894 ifmr->ifm_active |= IFM_OTHER;
4897 case I40E_PHY_TYPE_25GBASE_KR:
4898 ifmr->ifm_active |= IFM_25G_KR;
4900 case I40E_PHY_TYPE_25GBASE_CR:
4901 ifmr->ifm_active |= IFM_25G_CR;
4903 case I40E_PHY_TYPE_25GBASE_SR:
4904 ifmr->ifm_active |= IFM_25G_SR;
4906 case I40E_PHY_TYPE_25GBASE_LR:
4907 ifmr->ifm_active |= IFM_UNKNOWN;
4910 case I40E_PHY_TYPE_40GBASE_CR4:
4911 case I40E_PHY_TYPE_40GBASE_CR4_CU:
4912 ifmr->ifm_active |= IFM_40G_CR4;
4914 case I40E_PHY_TYPE_40GBASE_SR4:
4915 ifmr->ifm_active |= IFM_40G_SR4;
4917 case I40E_PHY_TYPE_40GBASE_LR4:
4918 ifmr->ifm_active |= IFM_40G_LR4;
4920 case I40E_PHY_TYPE_XLAUI:
4921 ifmr->ifm_active |= IFM_OTHER;
4923 case I40E_PHY_TYPE_1000BASE_KX:
4924 ifmr->ifm_active |= IFM_1000_KX;
4926 case I40E_PHY_TYPE_SGMII:
4927 ifmr->ifm_active |= IFM_1000_SGMII;
4929 /* ERJ: What's the difference between these? */
4930 case I40E_PHY_TYPE_10GBASE_CR1_CU:
4931 case I40E_PHY_TYPE_10GBASE_CR1:
4932 ifmr->ifm_active |= IFM_10G_CR1;
4934 case I40E_PHY_TYPE_10GBASE_KX4:
4935 ifmr->ifm_active |= IFM_10G_KX4;
4937 case I40E_PHY_TYPE_10GBASE_KR:
4938 ifmr->ifm_active |= IFM_10G_KR;
4940 case I40E_PHY_TYPE_SFI:
4941 ifmr->ifm_active |= IFM_10G_SFI;
4943 /* Our single 20G media type */
4944 case I40E_PHY_TYPE_20GBASE_KR2:
4945 ifmr->ifm_active |= IFM_20G_KR2;
4947 case I40E_PHY_TYPE_40GBASE_KR4:
4948 ifmr->ifm_active |= IFM_40G_KR4;
4950 case I40E_PHY_TYPE_XLPPI:
4951 case I40E_PHY_TYPE_40GBASE_AOC:
4952 ifmr->ifm_active |= IFM_40G_XLPPI;
4954 /* Unknown to driver */
4956 ifmr->ifm_active |= IFM_UNKNOWN;
4959 /* Report flow control status as well */
4960 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4961 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4962 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4963 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4971 struct ixl_pf *pf = arg;
4974 ixl_init_locked(pf);
4979 * NOTE: Fortville does not support forcing media speeds. Instead,
4980 * use the set_advertise sysctl to set the speeds Fortville
4981 * will advertise or be allowed to operate at.
4984 ixl_media_change(struct ifnet * ifp)
4986 struct ixl_vsi *vsi = ifp->if_softc;
4987 struct ifmedia *ifm = &vsi->media;
4989 INIT_DEBUGOUT("ixl_media_change: begin");
4991 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4994 if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
4999 /*********************************************************************
5002 * ixl_ioctl is called when the user wants to configure the
5005 * return 0 on success, positive on failure
5006 **********************************************************************/
5009 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5011 struct ixl_vsi *vsi = ifp->if_softc;
5012 struct ixl_pf *pf = vsi->back;
5013 struct ifreq *ifr = (struct ifreq *)data;
5014 struct ifdrv *ifd = (struct ifdrv *)data;
5015 #if defined(INET) || defined(INET6)
5016 struct ifaddr *ifa = (struct ifaddr *)data;
5017 bool avoid_reset = FALSE;
5024 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5026 if (ifa->ifa_addr->sa_family == AF_INET)
5030 if (ifa->ifa_addr->sa_family == AF_INET6)
5033 #if defined(INET) || defined(INET6)
5035 ** Calling init results in link renegotiation,
5036 ** so we avoid doing it when possible.
5039 ifp->if_flags |= IFF_UP;
5040 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5043 if (!(ifp->if_flags & IFF_NOARP))
5044 arp_ifinit(ifp, ifa);
5047 error = ether_ioctl(ifp, command, data);
5051 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5052 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5053 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5057 ifp->if_mtu = ifr->ifr_mtu;
5058 vsi->max_frame_size =
5059 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5060 + ETHER_VLAN_ENCAP_LEN;
5061 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5062 ixl_init_locked(pf);
5067 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5069 if (ifp->if_flags & IFF_UP) {
5070 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5071 if ((ifp->if_flags ^ pf->if_flags) &
5072 (IFF_PROMISC | IFF_ALLMULTI)) {
5073 ixl_set_promisc(vsi);
5081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5082 ixl_stop_locked(pf);
5085 pf->if_flags = ifp->if_flags;
5090 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5093 /* NVM update command */
5094 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5095 error = ixl_handle_nvmupd_cmd(pf, ifd);
5100 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5101 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5103 ixl_disable_rings_intr(vsi);
5105 ixl_enable_intr(vsi);
5110 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5111 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5113 ixl_disable_rings_intr(vsi);
5115 ixl_enable_intr(vsi);
5122 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5123 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5127 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5128 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5130 ixl_cap_txcsum_tso(vsi, ifp, mask);
5132 if (mask & IFCAP_RXCSUM)
5133 ifp->if_capenable ^= IFCAP_RXCSUM;
5134 if (mask & IFCAP_RXCSUM_IPV6)
5135 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5136 if (mask & IFCAP_LRO)
5137 ifp->if_capenable ^= IFCAP_LRO;
5138 if (mask & IFCAP_VLAN_HWTAGGING)
5139 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5140 if (mask & IFCAP_VLAN_HWFILTER)
5141 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5142 if (mask & IFCAP_VLAN_HWTSO)
5143 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5144 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5146 ixl_init_locked(pf);
5149 VLAN_CAPABILITIES(ifp);
5153 #if __FreeBSD_version >= 1003000
5156 struct ifi2creq i2c;
5159 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5163 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5166 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5170 if (i2c.len > sizeof(i2c.data)) {
5175 for (i = 0; i < i2c.len; i++)
5176 if (ixl_read_i2c_byte(pf, i2c.offset + i,
5177 i2c.dev_addr, &i2c.data[i]))
5180 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5185 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5186 error = ether_ioctl(ifp, command, data);
5194 ixl_find_i2c_interface(struct ixl_pf *pf)
5196 struct i40e_hw *hw = &pf->hw;
5197 bool i2c_en, port_matched;
5200 for (int i = 0; i < 4; i++) {
5201 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5202 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5203 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5204 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5206 if (i2c_en && port_matched)
5214 ixl_phy_type_string(u32 bit_pos, bool ext)
5216 static char * phy_types_str[32] = {
5246 "1000BASE-T Optical",
5250 static char * ext_phy_types_str[4] = {
5257 if (ext && bit_pos > 3) return "Invalid_Ext";
5258 if (bit_pos > 31) return "Invalid";
5260 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5264 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5266 device_t dev = pf->dev;
5267 struct i40e_hw *hw = &pf->hw;
5268 struct i40e_aq_desc desc;
5269 enum i40e_status_code status;
5271 struct i40e_aqc_get_link_status *aq_link_status =
5272 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5274 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5275 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5276 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5279 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5280 __func__, i40e_stat_str(hw, status),
5281 i40e_aq_str(hw, hw->aq.asq_last_status));
5285 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5290 ixl_phy_type_string_ls(u8 val)
5293 return ixl_phy_type_string(val - 0x1F, true);
5295 return ixl_phy_type_string(val, false);
5299 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5301 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5302 device_t dev = pf->dev;
5306 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5308 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5312 struct i40e_aqc_get_link_status link_status;
5313 error = ixl_aq_get_link_status(pf, &link_status);
5319 /* TODO: Add 25G types */
5320 sbuf_printf(buf, "\n"
5321 "PHY Type : 0x%02x<%s>\n"
5323 "Link info: 0x%02x\n"
5324 "AN info : 0x%02x\n"
5325 "Ext info : 0x%02x\n"
5326 "Loopback : 0x%02x\n"
5330 link_status.phy_type,
5331 ixl_phy_type_string_ls(link_status.phy_type),
5332 link_status.link_speed,
5333 link_status.link_info,
5334 link_status.an_info,
5335 link_status.ext_info,
5336 link_status.loopback,
5337 link_status.max_frame_size,
5339 link_status.power_desc);
5341 error = sbuf_finish(buf);
5343 device_printf(dev, "Error finishing sbuf: %d\n", error);
5350 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5352 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5353 struct i40e_hw *hw = &pf->hw;
5354 device_t dev = pf->dev;
5355 enum i40e_status_code status;
5356 struct i40e_aq_get_phy_abilities_resp abilities;
5360 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5362 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5366 status = i40e_aq_get_phy_capabilities(hw,
5367 FALSE, FALSE, &abilities, NULL);
5370 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5371 __func__, i40e_stat_str(hw, status),
5372 i40e_aq_str(hw, hw->aq.asq_last_status));
5377 sbuf_printf(buf, "\n"
5379 abilities.phy_type);
5381 if (abilities.phy_type != 0) {
5382 sbuf_printf(buf, "<");
5383 for (int i = 0; i < 32; i++)
5384 if ((1 << i) & abilities.phy_type)
5385 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5386 sbuf_printf(buf, ">\n");
5389 sbuf_printf(buf, "PHY Ext : %02x",
5390 abilities.phy_type_ext);
5392 if (abilities.phy_type_ext != 0) {
5393 sbuf_printf(buf, "<");
5394 for (int i = 0; i < 4; i++)
5395 if ((1 << i) & abilities.phy_type_ext)
5396 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5397 sbuf_printf(buf, ">");
5399 sbuf_printf(buf, "\n");
5407 "ID : %02x %02x %02x %02x\n"
5408 "ModType : %02x %02x %02x\n"
5412 abilities.link_speed,
5413 abilities.abilities, abilities.eee_capability,
5414 abilities.eeer_val, abilities.d3_lpan,
5415 abilities.phy_id[0], abilities.phy_id[1],
5416 abilities.phy_id[2], abilities.phy_id[3],
5417 abilities.module_type[0], abilities.module_type[1],
5418 abilities.module_type[2], abilities.phy_type_ext >> 5,
5419 abilities.phy_type_ext & 0x1F,
5420 abilities.ext_comp_code);
5422 error = sbuf_finish(buf);
5424 device_printf(dev, "Error finishing sbuf: %d\n", error);
5431 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5433 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5434 struct ixl_vsi *vsi = &pf->vsi;
5435 struct ixl_mac_filter *f;
5440 int ftl_counter = 0;
5444 SLIST_FOREACH(f, &vsi->ftl, next) {
5449 sysctl_handle_string(oidp, "(none)", 6, req);
5453 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5454 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5456 sprintf(buf_i++, "\n");
5457 SLIST_FOREACH(f, &vsi->ftl, next) {
5459 MAC_FORMAT ", vlan %4d, flags %#06x",
5460 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5462 /* don't print '\n' for last entry */
5463 if (++ftl_counter != ftl_len) {
5464 sprintf(buf_i, "\n");
5469 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5471 printf("sysctl error: %d\n", error);
5472 free(buf, M_DEVBUF);
5476 #define IXL_SW_RES_SIZE 0x14
5478 ixl_res_alloc_cmp(const void *a, const void *b)
5480 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5481 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5482 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5484 return ((int)one->resource_type - (int)two->resource_type);
5488 * Longest string length: 25
5491 ixl_switch_res_type_string(u8 type)
5493 static char * ixl_switch_res_type_strings[0x14] = {
5496 "Perfect Match MAC address",
5499 "Multicast hash entry",
5500 "Unicast hash entry",
5504 "VLAN Statistic Pool",
5507 "Inner VLAN Forward filter",
5517 return ixl_switch_res_type_strings[type];
5519 return "(Reserved)";
5523 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5525 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5526 struct i40e_hw *hw = &pf->hw;
5527 device_t dev = pf->dev;
5529 enum i40e_status_code status;
5533 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5535 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5537 device_printf(dev, "Could not allocate sbuf for output.\n");
5541 bzero(resp, sizeof(resp));
5542 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5548 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5549 __func__, i40e_stat_str(hw, status),
5550 i40e_aq_str(hw, hw->aq.asq_last_status));
5555 /* Sort entries by type for display */
5556 qsort(resp, num_entries,
5557 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5558 &ixl_res_alloc_cmp);
5560 sbuf_cat(buf, "\n");
5561 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5563 " Type | Guaranteed | Total | Used | Un-allocated\n"
5564 " | (this) | (all) | (this) | (all) \n");
5565 for (int i = 0; i < num_entries; i++) {
5567 "%25s | %10d %5d %6d %12d",
5568 ixl_switch_res_type_string(resp[i].resource_type),
5572 resp[i].total_unalloced);
5573 if (i < num_entries - 1)
5574 sbuf_cat(buf, "\n");
5577 error = sbuf_finish(buf);
5579 device_printf(dev, "Error finishing sbuf: %d\n", error);
5586 ** Caller must init and delete sbuf; this function will clear and
5587 ** finish it for caller.
5589 ** XXX: Cannot use the SEID for this, since there is no longer a
5590 ** fixed mapping between SEID and element type.
5593 ixl_switch_element_string(struct sbuf *s,
5594 struct i40e_aqc_switch_config_element_resp *element)
5598 switch (element->element_type) {
5599 case I40E_AQ_SW_ELEM_TYPE_MAC:
5600 sbuf_printf(s, "MAC %3d", element->element_info);
5602 case I40E_AQ_SW_ELEM_TYPE_PF:
5603 sbuf_printf(s, "PF %3d", element->element_info);
5605 case I40E_AQ_SW_ELEM_TYPE_VF:
5606 sbuf_printf(s, "VF %3d", element->element_info);
5608 case I40E_AQ_SW_ELEM_TYPE_EMP:
5611 case I40E_AQ_SW_ELEM_TYPE_BMC:
5614 case I40E_AQ_SW_ELEM_TYPE_PV:
5617 case I40E_AQ_SW_ELEM_TYPE_VEB:
5620 case I40E_AQ_SW_ELEM_TYPE_PA:
5623 case I40E_AQ_SW_ELEM_TYPE_VSI:
5624 sbuf_printf(s, "VSI %3d", element->element_info);
5632 return sbuf_data(s);
5636 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5638 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5639 struct i40e_hw *hw = &pf->hw;
5640 device_t dev = pf->dev;
5643 enum i40e_status_code status;
5646 u8 aq_buf[I40E_AQ_LARGE_BUF];
5648 struct i40e_aqc_get_switch_config_resp *sw_config;
5649 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5651 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5653 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5657 status = i40e_aq_get_switch_config(hw, sw_config,
5658 sizeof(aq_buf), &next, NULL);
5661 "%s: aq_get_switch_config() error %s, aq error %s\n",
5662 __func__, i40e_stat_str(hw, status),
5663 i40e_aq_str(hw, hw->aq.asq_last_status));
5668 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5671 nmbuf = sbuf_new_auto();
5673 device_printf(dev, "Could not allocate sbuf for name output.\n");
5678 sbuf_cat(buf, "\n");
5679 /* Assuming <= 255 elements in switch */
5680 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5681 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5683 ** Revision -- all elements are revision 1 for now
5686 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5687 " | | | (uplink)\n");
5688 for (int i = 0; i < sw_config->header.num_reported; i++) {
5689 // "%4d (%8s) | %8s %8s %#8x",
5690 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5692 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5693 &sw_config->element[i]));
5694 sbuf_cat(buf, " | ");
5695 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5697 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5699 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5700 if (i < sw_config->header.num_reported - 1)
5701 sbuf_cat(buf, "\n");
5705 error = sbuf_finish(buf);
5707 device_printf(dev, "Error finishing sbuf: %d\n", error);
5715 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5717 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5718 struct i40e_hw *hw = &pf->hw;
5719 device_t dev = pf->dev;
5722 enum i40e_status_code status;
5725 struct i40e_aqc_get_set_rss_key_data key_data;
5727 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5729 device_printf(dev, "Could not allocate sbuf for output.\n");
5733 sbuf_cat(buf, "\n");
5734 if (hw->mac.type == I40E_MAC_X722) {
5735 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5736 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5738 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5739 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5740 sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5742 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5743 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5744 sbuf_printf(buf, "%4D", (u_char *)®, "");
5748 error = sbuf_finish(buf);
5750 device_printf(dev, "Error finishing sbuf: %d\n", error);
5757 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5759 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5760 struct i40e_hw *hw = &pf->hw;
5761 device_t dev = pf->dev;
5764 enum i40e_status_code status;
5768 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5770 device_printf(dev, "Could not allocate sbuf for output.\n");
5774 sbuf_cat(buf, "\n");
5775 if (hw->mac.type == I40E_MAC_X722) {
5776 bzero(hlut, sizeof(hlut));
5777 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5779 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5780 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5781 sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5783 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5784 reg = rd32(hw, I40E_PFQF_HLUT(i));
5785 sbuf_printf(buf, "%4D", (u_char *)®, "");
5789 error = sbuf_finish(buf);
5791 device_printf(dev, "Error finishing sbuf: %d\n", error);
5798 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
5800 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5801 struct i40e_hw *hw = &pf->hw;
5804 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
5805 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
5807 return sysctl_handle_long(oidp, NULL, hena, req);
5811 * Sysctl to disable firmware's link management
5813 * 1 - Disable link management on this port
5814 * 0 - Re-enable link management
5816 * On normal NVMs, firmware manages link by default.
5819 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
5821 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5822 struct i40e_hw *hw = &pf->hw;
5823 device_t dev = pf->dev;
5824 int requested_mode = -1;
5825 enum i40e_status_code status = 0;
5828 /* Read in new mode */
5829 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
5830 if ((error) || (req->newptr == NULL))
5832 /* Check for sane value */
5833 if (requested_mode < 0 || requested_mode > 1) {
5834 device_printf(dev, "Valid modes are 0 or 1\n");
5839 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
5842 "%s: Error setting new phy debug mode %s,"
5843 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
5844 i40e_aq_str(hw, hw->aq.asq_last_status));
5852 * Sysctl to read a byte from I2C bus.
5854 * Input: 32-bit value:
5855 * bits 0-7: device address (0xA0 or 0xA2)
5856 * bits 8-15: offset (0-255)
5857 * bits 16-31: unused
5858 * Output: 8-bit value read
5861 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
5863 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5864 device_t dev = pf->dev;
5865 int input = -1, error = 0;
5867 device_printf(dev, "%s: start\n", __func__);
5869 u8 dev_addr, offset, output;
5871 /* Read in I2C read parameters */
5872 error = sysctl_handle_int(oidp, &input, 0, req);
5873 if ((error) || (req->newptr == NULL))
5875 /* Validate device address */
5876 dev_addr = input & 0xFF;
5877 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5880 offset = (input >> 8) & 0xFF;
5882 error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
5886 device_printf(dev, "%02X\n", output);
5891 * Sysctl to write a byte to the I2C bus.
5893 * Input: 32-bit value:
5894 * bits 0-7: device address (0xA0 or 0xA2)
5895 * bits 8-15: offset (0-255)
5896 * bits 16-23: value to write
5897 * bits 24-31: unused
5898 * Output: 8-bit value written
5901 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
5903 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5904 device_t dev = pf->dev;
5905 int input = -1, error = 0;
5907 u8 dev_addr, offset, value;
5909 /* Read in I2C write parameters */
5910 error = sysctl_handle_int(oidp, &input, 0, req);
5911 if ((error) || (req->newptr == NULL))
5913 /* Validate device address */
5914 dev_addr = input & 0xFF;
5915 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
5918 offset = (input >> 8) & 0xFF;
5919 value = (input >> 16) & 0xFF;
5921 error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
5925 device_printf(dev, "%02X written\n", value);
5930 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5931 u8 bit_pos, int *is_set)
5933 device_t dev = pf->dev;
5934 struct i40e_hw *hw = &pf->hw;
5935 enum i40e_status_code status;
5937 status = i40e_aq_get_phy_capabilities(hw,
5938 FALSE, FALSE, abilities, NULL);
5941 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5942 __func__, i40e_stat_str(hw, status),
5943 i40e_aq_str(hw, hw->aq.asq_last_status));
5947 *is_set = !!(abilities->phy_type_ext & bit_pos);
5952 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
5953 u8 bit_pos, int set)
5955 device_t dev = pf->dev;
5956 struct i40e_hw *hw = &pf->hw;
5957 struct i40e_aq_set_phy_config config;
5958 enum i40e_status_code status;
5960 /* Set new PHY config */
5961 memset(&config, 0, sizeof(config));
5962 config.fec_config = abilities->phy_type_ext & ~(bit_pos);
5964 config.fec_config |= bit_pos;
5965 if (config.fec_config != abilities->phy_type_ext) {
5966 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5967 config.phy_type = abilities->phy_type;
5968 config.phy_type_ext = abilities->phy_type_ext;
5969 config.link_speed = abilities->link_speed;
5970 config.eee_capability = abilities->eee_capability;
5971 config.eeer = abilities->eeer_val;
5972 config.low_power_ctrl = abilities->d3_lpan;
5973 status = i40e_aq_set_phy_config(hw, &config, NULL);
5977 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
5978 __func__, i40e_stat_str(hw, status),
5979 i40e_aq_str(hw, hw->aq.asq_last_status));
5988 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
5990 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5991 int mode, error = 0;
5993 struct i40e_aq_get_phy_abilities_resp abilities;
5994 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
5997 /* Read in new mode */
5998 error = sysctl_handle_int(oidp, &mode, 0, req);
5999 if ((error) || (req->newptr == NULL))
6002 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6006 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6008 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6009 int mode, error = 0;
6011 struct i40e_aq_get_phy_abilities_resp abilities;
6012 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
6015 /* Read in new mode */
6016 error = sysctl_handle_int(oidp, &mode, 0, req);
6017 if ((error) || (req->newptr == NULL))
6020 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6024 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6026 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6027 int mode, error = 0;
6029 struct i40e_aq_get_phy_abilities_resp abilities;
6030 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
6033 /* Read in new mode */
6034 error = sysctl_handle_int(oidp, &mode, 0, req);
6035 if ((error) || (req->newptr == NULL))
6038 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6042 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6044 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6045 int mode, error = 0;
6047 struct i40e_aq_get_phy_abilities_resp abilities;
6048 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
6051 /* Read in new mode */
6052 error = sysctl_handle_int(oidp, &mode, 0, req);
6053 if ((error) || (req->newptr == NULL))
6056 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6060 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6062 struct ixl_pf *pf = (struct ixl_pf *)arg1;
6063 int mode, error = 0;
6065 struct i40e_aq_get_phy_abilities_resp abilities;
6066 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
6069 /* Read in new mode */
6070 error = sysctl_handle_int(oidp, &mode, 0, req);
6071 if ((error) || (req->newptr == NULL))
6074 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));