1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
43 #include <net/netmap.h>
44 #include <sys/selinfo.h>
45 #include <dev/netmap/netmap_kern.h>
46 #endif /* DEV_NETMAP */
48 static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
51 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
54 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
55 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
63 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
66 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
69 ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
73 if (!(mask & pf->dbg_mask))
77 device_printf(pf->dev, fmt, args);
82 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
85 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
87 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
88 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
89 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
92 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
93 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
94 hw->aq.api_maj_ver, hw->aq.api_min_ver,
95 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
96 IXL_NVM_VERSION_HI_SHIFT,
97 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
98 IXL_NVM_VERSION_LO_SHIFT,
100 oem_ver, oem_build, oem_patch);
104 ixl_print_nvm_version(struct ixl_pf *pf)
106 struct i40e_hw *hw = &pf->hw;
107 device_t dev = pf->dev;
110 sbuf = sbuf_new_auto();
111 ixl_nvm_version_str(hw, sbuf);
113 device_printf(dev, "%s\n", sbuf_data(sbuf));
118 ixl_configure_tx_itr(struct ixl_pf *pf)
120 struct i40e_hw *hw = &pf->hw;
121 struct ixl_vsi *vsi = &pf->vsi;
122 struct ixl_queue *que = vsi->queues;
124 vsi->tx_itr_setting = pf->tx_itr;
126 for (int i = 0; i < vsi->num_queues; i++, que++) {
127 struct tx_ring *txr = &que->txr;
129 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
130 vsi->tx_itr_setting);
131 txr->itr = vsi->tx_itr_setting;
132 txr->latency = IXL_AVE_LATENCY;
137 ixl_configure_rx_itr(struct ixl_pf *pf)
139 struct i40e_hw *hw = &pf->hw;
140 struct ixl_vsi *vsi = &pf->vsi;
141 struct ixl_queue *que = vsi->queues;
143 vsi->rx_itr_setting = pf->rx_itr;
145 for (int i = 0; i < vsi->num_queues; i++, que++) {
146 struct rx_ring *rxr = &que->rxr;
148 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
149 vsi->rx_itr_setting);
150 rxr->itr = vsi->rx_itr_setting;
151 rxr->latency = IXL_AVE_LATENCY;
156 * Write PF ITR values to queue ITR registers.
159 ixl_configure_itr(struct ixl_pf *pf)
161 ixl_configure_tx_itr(pf);
162 ixl_configure_rx_itr(pf);
166 /*********************************************************************
169 * This routine is used in two ways. It is used by the stack as
170 * init entry point in network interface structure. It is also used
171 * by the driver as a hw/sw initialization routine to get to a
174 * return 0 on success, positive on failure
175 **********************************************************************/
177 ixl_init_locked(struct ixl_pf *pf)
179 struct i40e_hw *hw = &pf->hw;
180 struct ixl_vsi *vsi = &pf->vsi;
181 struct ifnet *ifp = vsi->ifp;
182 device_t dev = pf->dev;
183 struct i40e_filter_control_settings filter;
184 u8 tmpaddr[ETHER_ADDR_LEN];
187 mtx_assert(&pf->pf_mtx, MA_OWNED);
188 INIT_DEBUGOUT("ixl_init_locked: begin");
192 /* Get the latest mac address... User might use a LAA */
193 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
194 I40E_ETH_LENGTH_OF_ADDRESS);
195 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
196 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
197 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
198 bcopy(tmpaddr, hw->mac.addr,
199 I40E_ETH_LENGTH_OF_ADDRESS);
200 ret = i40e_aq_mac_address_write(hw,
201 I40E_AQC_WRITE_TYPE_LAA_ONLY,
204 device_printf(dev, "LLA address"
205 "change failed!!\n");
210 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
212 /* Set the various hardware offload abilities */
213 ifp->if_hwassist = 0;
214 if (ifp->if_capenable & IFCAP_TSO)
215 ifp->if_hwassist |= CSUM_TSO;
216 if (ifp->if_capenable & IFCAP_TXCSUM)
217 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
218 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
219 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
221 /* Set up the device filtering */
222 bzero(&filter, sizeof(filter));
223 filter.enable_ethtype = TRUE;
224 filter.enable_macvlan = TRUE;
225 filter.enable_fdir = FALSE;
226 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
227 if (i40e_set_filter_control(hw, &filter))
228 device_printf(dev, "i40e_set_filter_control() failed\n");
230 /* Prepare the VSI: rings, hmc contexts, etc... */
231 if (ixl_initialize_vsi(vsi)) {
232 device_printf(dev, "initialize vsi failed!!\n");
239 /* Add protocol filters to list */
240 ixl_init_filters(vsi);
242 /* Setup vlan's if needed */
243 ixl_setup_vlan_filters(vsi);
245 /* Set up MSI/X routing and the ITR settings */
246 if (pf->enable_msix) {
247 ixl_configure_queue_intr_msix(pf);
248 ixl_configure_itr(pf);
250 ixl_configure_legacy(pf);
252 ixl_enable_rings(vsi);
254 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
256 ixl_reconfigure_filters(vsi);
258 /* And now turn on interrupts */
259 ixl_enable_intr(vsi);
262 hw->phy.get_link_info = TRUE;
263 i40e_get_link_status(hw, &pf->link_up);
264 ixl_update_link_status(pf);
266 /* Set initial advertised speed sysctl value */
267 ixl_get_initial_advertised_speeds(pf);
269 /* Start the local timer */
270 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
272 /* Now inform the stack we're ready */
273 ifp->if_drv_flags |= IFF_DRV_RUNNING;
277 /*********************************************************************
279 * Get the hardware capabilities
281 **********************************************************************/
284 ixl_get_hw_capabilities(struct ixl_pf *pf)
286 struct i40e_aqc_list_capabilities_element_resp *buf;
287 struct i40e_hw *hw = &pf->hw;
288 device_t dev = pf->dev;
293 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
295 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
296 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
297 device_printf(dev, "Unable to allocate cap memory\n");
301 /* This populates the hw struct */
302 error = i40e_aq_discover_capabilities(hw, buf, len,
303 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
305 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
307 /* retry once with a larger buffer */
311 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
312 device_printf(dev, "capability discovery failed: %d\n",
313 pf->hw.aq.asq_last_status);
317 /* Capture this PF's starting queue pair */
318 pf->qbase = hw->func_caps.base_queue;
321 device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
322 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
323 hw->pf_id, hw->func_caps.num_vfs,
324 hw->func_caps.num_msix_vectors,
325 hw->func_caps.num_msix_vectors_vf,
326 hw->func_caps.fd_filters_guaranteed,
327 hw->func_caps.fd_filters_best_effort,
328 hw->func_caps.num_tx_qp,
329 hw->func_caps.num_rx_qp,
330 hw->func_caps.base_queue);
332 /* Print a subset of the capability information. */
333 device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
334 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
335 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
336 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
337 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
344 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
346 device_t dev = vsi->dev;
348 /* Enable/disable TXCSUM/TSO4 */
349 if (!(ifp->if_capenable & IFCAP_TXCSUM)
350 && !(ifp->if_capenable & IFCAP_TSO4)) {
351 if (mask & IFCAP_TXCSUM) {
352 ifp->if_capenable |= IFCAP_TXCSUM;
353 /* enable TXCSUM, restore TSO if previously enabled */
354 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
355 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
356 ifp->if_capenable |= IFCAP_TSO4;
359 else if (mask & IFCAP_TSO4) {
360 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
361 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
363 "TSO4 requires txcsum, enabling both...\n");
365 } else if((ifp->if_capenable & IFCAP_TXCSUM)
366 && !(ifp->if_capenable & IFCAP_TSO4)) {
367 if (mask & IFCAP_TXCSUM)
368 ifp->if_capenable &= ~IFCAP_TXCSUM;
369 else if (mask & IFCAP_TSO4)
370 ifp->if_capenable |= IFCAP_TSO4;
371 } else if((ifp->if_capenable & IFCAP_TXCSUM)
372 && (ifp->if_capenable & IFCAP_TSO4)) {
373 if (mask & IFCAP_TXCSUM) {
374 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
375 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
377 "TSO4 requires txcsum, disabling both...\n");
378 } else if (mask & IFCAP_TSO4)
379 ifp->if_capenable &= ~IFCAP_TSO4;
382 /* Enable/disable TXCSUM_IPV6/TSO6 */
383 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
384 && !(ifp->if_capenable & IFCAP_TSO6)) {
385 if (mask & IFCAP_TXCSUM_IPV6) {
386 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
387 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
388 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
389 ifp->if_capenable |= IFCAP_TSO6;
391 } else if (mask & IFCAP_TSO6) {
392 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
393 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
395 "TSO6 requires txcsum6, enabling both...\n");
397 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
398 && !(ifp->if_capenable & IFCAP_TSO6)) {
399 if (mask & IFCAP_TXCSUM_IPV6)
400 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
401 else if (mask & IFCAP_TSO6)
402 ifp->if_capenable |= IFCAP_TSO6;
403 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
404 && (ifp->if_capenable & IFCAP_TSO6)) {
405 if (mask & IFCAP_TXCSUM_IPV6) {
406 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
407 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
409 "TSO6 requires txcsum6, disabling both...\n");
410 } else if (mask & IFCAP_TSO6)
411 ifp->if_capenable &= ~IFCAP_TSO6;
415 /* For the set_advertise sysctl */
417 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
419 struct i40e_hw *hw = &pf->hw;
420 device_t dev = pf->dev;
421 enum i40e_status_code status;
422 struct i40e_aq_get_phy_abilities_resp abilities;
424 /* Set initial sysctl values */
425 status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
428 /* Non-fatal error */
429 device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
434 if (abilities.link_speed & I40E_LINK_SPEED_40GB)
435 pf->advertised_speed |= 0x10;
436 if (abilities.link_speed & I40E_LINK_SPEED_20GB)
437 pf->advertised_speed |= 0x8;
438 if (abilities.link_speed & I40E_LINK_SPEED_10GB)
439 pf->advertised_speed |= 0x4;
440 if (abilities.link_speed & I40E_LINK_SPEED_1GB)
441 pf->advertised_speed |= 0x2;
442 if (abilities.link_speed & I40E_LINK_SPEED_100MB)
443 pf->advertised_speed |= 0x1;
447 ixl_teardown_hw_structs(struct ixl_pf *pf)
449 enum i40e_status_code status = 0;
450 struct i40e_hw *hw = &pf->hw;
451 device_t dev = pf->dev;
453 /* Shutdown LAN HMC */
454 if (hw->hmc.hmc_obj) {
455 status = i40e_shutdown_lan_hmc(hw);
458 "init: LAN HMC shutdown failure; status %d\n", status);
463 // XXX: This gets called when we know the adminq is inactive;
464 // so we already know it's setup when we get here.
466 /* Shutdown admin queue */
467 status = i40e_shutdown_adminq(hw);
470 "init: Admin Queue shutdown failure; status %d\n", status);
477 ixl_reset(struct ixl_pf *pf)
479 struct i40e_hw *hw = &pf->hw;
480 device_t dev = pf->dev;
484 // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
486 error = i40e_pf_reset(hw);
488 device_printf(dev, "init: PF reset failure");
493 error = i40e_init_adminq(hw);
495 device_printf(dev, "init: Admin queue init failure;"
496 " status code %d", error);
501 i40e_clear_pxe_mode(hw);
503 error = ixl_get_hw_capabilities(pf);
505 device_printf(dev, "init: Error retrieving HW capabilities;"
506 " status code %d\n", error);
510 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
511 hw->func_caps.num_rx_qp, 0, 0);
513 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
519 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
521 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
527 // XXX: possible fix for panic, but our failure recovery is still broken
528 error = ixl_switch_config(pf);
530 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
535 error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
538 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
539 " aq_err %d\n", error, hw->aq.asq_last_status);
544 error = i40e_set_fc(hw, &set_fc_err_mask, true);
546 device_printf(dev, "init: setting link flow control failed; retcode %d,"
547 " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
551 // XXX: (Rebuild VSIs?)
553 /* Firmware delay workaround */
554 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
555 (hw->aq.fw_maj_ver < 4)) {
557 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
559 device_printf(dev, "init: link restart failed, aq_err %d\n",
560 hw->aq.asq_last_status);
571 ** MSIX Interrupt Handlers and Tasklets
574 ixl_handle_que(void *context, int pending)
576 struct ixl_queue *que = context;
577 struct ixl_vsi *vsi = que->vsi;
578 struct i40e_hw *hw = vsi->hw;
579 struct tx_ring *txr = &que->txr;
580 struct ifnet *ifp = vsi->ifp;
583 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
584 more = ixl_rxeof(que, IXL_RX_LIMIT);
587 if (!drbr_empty(ifp, txr->br))
588 ixl_mq_start_locked(ifp, txr);
591 taskqueue_enqueue(que->tq, &que->task);
596 /* Reenable this interrupt - hmmm */
597 ixl_enable_queue(hw, que->me);
602 /*********************************************************************
604 * Legacy Interrupt Service routine
606 **********************************************************************/
610 struct ixl_pf *pf = arg;
611 struct i40e_hw *hw = &pf->hw;
612 struct ixl_vsi *vsi = &pf->vsi;
613 struct ixl_queue *que = vsi->queues;
614 struct ifnet *ifp = vsi->ifp;
615 struct tx_ring *txr = &que->txr;
617 bool more_tx, more_rx;
621 /* Protect against spurious interrupts */
622 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
625 icr0 = rd32(hw, I40E_PFINT_ICR0);
627 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
628 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
629 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
631 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
634 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
635 taskqueue_enqueue(pf->tq, &pf->vflr_task);
638 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
639 taskqueue_enqueue(pf->tq, &pf->adminq);
643 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
646 more_tx = ixl_txeof(que);
647 if (!drbr_empty(vsi->ifp, txr->br))
651 /* re-enable other interrupt causes */
652 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
654 /* And now the queues */
655 reg = rd32(hw, I40E_QINT_RQCTL(0));
656 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
657 wr32(hw, I40E_QINT_RQCTL(0), reg);
659 reg = rd32(hw, I40E_QINT_TQCTL(0));
660 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
661 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
662 wr32(hw, I40E_QINT_TQCTL(0), reg);
664 ixl_enable_legacy(hw);
670 /*********************************************************************
672 * MSIX VSI Interrupt Service routine
674 **********************************************************************/
676 ixl_msix_que(void *arg)
678 struct ixl_queue *que = arg;
679 struct ixl_vsi *vsi = que->vsi;
680 struct i40e_hw *hw = vsi->hw;
681 struct tx_ring *txr = &que->txr;
682 bool more_tx, more_rx;
684 /* Protect against spurious interrupts */
685 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
690 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
693 more_tx = ixl_txeof(que);
695 ** Make certain that if the stack
696 ** has anything queued the task gets
697 ** scheduled to handle it.
699 if (!drbr_empty(vsi->ifp, txr->br))
703 ixl_set_queue_rx_itr(que);
704 ixl_set_queue_tx_itr(que);
706 if (more_tx || more_rx)
707 taskqueue_enqueue(que->tq, &que->task);
709 ixl_enable_queue(hw, que->me);
715 /*********************************************************************
717 * MSIX Admin Queue Interrupt Service routine
719 **********************************************************************/
721 ixl_msix_adminq(void *arg)
723 struct ixl_pf *pf = arg;
724 struct i40e_hw *hw = &pf->hw;
725 device_t dev = pf->dev;
726 u32 reg, mask, rstat_reg;
727 bool do_task = FALSE;
731 reg = rd32(hw, I40E_PFINT_ICR0);
732 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
734 /* Check on the cause */
735 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
736 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
740 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
741 ixl_handle_mdd_event(pf);
742 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
745 if (reg & I40E_PFINT_ICR0_GRST_MASK) {
746 device_printf(dev, "Reset Requested!\n");
747 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
748 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
749 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
750 device_printf(dev, "Reset type: ");
752 /* These others might be handled similarly to an EMPR reset */
753 case I40E_RESET_CORER:
756 case I40E_RESET_GLOBR:
759 case I40E_RESET_EMPR:
761 atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
767 /* overload admin queue task to check reset progress */
771 if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
772 device_printf(dev, "ECC Error detected!\n");
775 if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
776 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
777 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
778 device_printf(dev, "HMC Error detected!\n");
779 device_printf(dev, "INFO 0x%08x\n", reg);
780 reg = rd32(hw, I40E_PFHMC_ERRORDATA);
781 device_printf(dev, "DATA 0x%08x\n", reg);
782 wr32(hw, I40E_PFHMC_ERRORINFO, 0);
786 if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
787 device_printf(dev, "PCI Exception detected!\n");
791 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
792 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
793 taskqueue_enqueue(pf->tq, &pf->vflr_task);
798 taskqueue_enqueue(pf->tq, &pf->adminq);
800 ixl_enable_adminq(hw);
804 ixl_set_promisc(struct ixl_vsi *vsi)
806 struct ifnet *ifp = vsi->ifp;
807 struct i40e_hw *hw = vsi->hw;
809 bool uni = FALSE, multi = FALSE;
811 if (ifp->if_flags & IFF_ALLMULTI)
813 else { /* Need to count the multicast addresses */
814 struct ifmultiaddr *ifma;
816 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
817 if (ifma->ifma_addr->sa_family != AF_LINK)
819 if (mcnt == MAX_MULTICAST_ADDR)
823 if_maddr_runlock(ifp);
826 if (mcnt >= MAX_MULTICAST_ADDR)
828 if (ifp->if_flags & IFF_PROMISC)
831 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
832 vsi->seid, uni, NULL, TRUE);
833 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
834 vsi->seid, multi, NULL);
838 /*********************************************************************
841 * Routines for multicast and vlan filter management.
843 *********************************************************************/
845 ixl_add_multi(struct ixl_vsi *vsi)
847 struct ifmultiaddr *ifma;
848 struct ifnet *ifp = vsi->ifp;
849 struct i40e_hw *hw = vsi->hw;
852 IOCTL_DEBUGOUT("ixl_add_multi: begin");
856 ** First just get a count, to decide if we
857 ** we simply use multicast promiscuous.
859 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
860 if (ifma->ifma_addr->sa_family != AF_LINK)
864 if_maddr_runlock(ifp);
866 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
867 /* delete existing MC filters */
868 ixl_del_hw_filters(vsi, mcnt);
869 i40e_aq_set_vsi_multicast_promiscuous(hw,
870 vsi->seid, TRUE, NULL);
876 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
877 if (ifma->ifma_addr->sa_family != AF_LINK)
879 ixl_add_mc_filter(vsi,
880 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
883 if_maddr_runlock(ifp);
885 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
886 ixl_add_hw_filters(vsi, flags, mcnt);
889 IOCTL_DEBUGOUT("ixl_add_multi: end");
894 ixl_del_multi(struct ixl_vsi *vsi)
896 struct ifnet *ifp = vsi->ifp;
897 struct ifmultiaddr *ifma;
898 struct ixl_mac_filter *f;
902 IOCTL_DEBUGOUT("ixl_del_multi: begin");
904 /* Search for removed multicast addresses */
906 SLIST_FOREACH(f, &vsi->ftl, next) {
907 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
909 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
910 if (ifma->ifma_addr->sa_family != AF_LINK)
912 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
913 if (cmp_etheraddr(f->macaddr, mc_addr)) {
918 if (match == FALSE) {
919 f->flags |= IXL_FILTER_DEL;
924 if_maddr_runlock(ifp);
927 ixl_del_hw_filters(vsi, mcnt);
931 /*********************************************************************
934 * This routine checks for link status,updates statistics,
935 * and runs the watchdog check.
937 * Only runs when the driver is configured UP and RUNNING.
939 **********************************************************************/
942 ixl_local_timer(void *arg)
944 struct ixl_pf *pf = arg;
945 struct i40e_hw *hw = &pf->hw;
946 struct ixl_vsi *vsi = &pf->vsi;
947 struct ixl_queue *que = vsi->queues;
948 device_t dev = pf->dev;
952 mtx_assert(&pf->pf_mtx, MA_OWNED);
954 /* Fire off the adminq task */
955 taskqueue_enqueue(pf->tq, &pf->adminq);
958 ixl_update_stats_counters(pf);
960 /* Check status of the queues */
961 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
962 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
964 for (int i = 0; i < vsi->num_queues; i++, que++) {
965 /* Any queues with outstanding work get a sw irq */
967 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
969 ** Each time txeof runs without cleaning, but there
970 ** are uncleaned descriptors it increments busy. If
971 ** we get to 5 we declare it hung.
973 if (que->busy == IXL_QUEUE_HUNG) {
977 if (que->busy >= IXL_MAX_TX_BUSY) {
979 device_printf(dev, "Warning queue %d "
980 "appears to be hung!\n", i);
982 que->busy = IXL_QUEUE_HUNG;
986 /* Only reinit if all queues show hung */
987 if (hung == vsi->num_queues)
990 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
994 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
999 ** Note: this routine updates the OS on the link state
1000 ** the real check of the hardware only happens with
1001 ** a link interrupt.
1004 ixl_update_link_status(struct ixl_pf *pf)
1006 struct ixl_vsi *vsi = &pf->vsi;
1007 struct i40e_hw *hw = &pf->hw;
1008 struct ifnet *ifp = vsi->ifp;
1009 device_t dev = pf->dev;
1012 if (vsi->link_active == FALSE) {
1013 pf->fc = hw->fc.current_mode;
1015 device_printf(dev, "Link is up %d Gbps %s,"
1016 " Flow Control: %s\n",
1018 I40E_LINK_SPEED_40GB)? 40:10),
1019 "Full Duplex", ixl_fc_string[pf->fc]);
1021 vsi->link_active = TRUE;
1022 if_link_state_change(ifp, LINK_STATE_UP);
1024 } else { /* Link down */
1025 if (vsi->link_active == TRUE) {
1027 device_printf(dev, "Link is Down\n");
1028 if_link_state_change(ifp, LINK_STATE_DOWN);
1029 vsi->link_active = FALSE;
1036 /*********************************************************************
1038 * This routine disables all traffic on the adapter by issuing a
1039 * global reset on the MAC and deallocates TX/RX buffers.
1041 **********************************************************************/
1044 ixl_stop_locked(struct ixl_pf *pf)
1046 struct ixl_vsi *vsi = &pf->vsi;
1047 struct ifnet *ifp = vsi->ifp;
1049 INIT_DEBUGOUT("ixl_stop: begin\n");
1051 IXL_PF_LOCK_ASSERT(pf);
1053 /* Stop the local timer */
1054 callout_stop(&pf->timer);
1056 ixl_disable_rings_intr(vsi);
1057 ixl_disable_rings(vsi);
1059 /* Tell the stack that the interface is no longer active */
1060 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1064 ixl_stop(struct ixl_pf *pf)
1067 ixl_stop_locked(pf);
1070 ixl_teardown_queue_msix(&pf->vsi);
1071 ixl_free_queue_tqs(&pf->vsi);
1074 /*********************************************************************
1076 * Setup MSIX Interrupt resources and handlers for the VSI
1078 **********************************************************************/
1080 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1082 device_t dev = pf->dev;
1083 struct ixl_vsi *vsi = &pf->vsi;
1084 struct ixl_queue *que = vsi->queues;
1089 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1090 &rid, RF_SHAREABLE | RF_ACTIVE);
1091 if (pf->res == NULL) {
1092 device_printf(dev, "Unable to allocate"
1093 " bus resource: vsi legacy/msi interrupt\n");
1097 /* Set the handler function */
1098 error = bus_setup_intr(dev, pf->res,
1099 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1100 ixl_intr, pf, &pf->tag);
1103 device_printf(dev, "Failed to register legacy/msi handler\n");
1106 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1107 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1108 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1109 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1110 taskqueue_thread_enqueue, &que->tq);
1111 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1112 device_get_nameunit(dev));
1113 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1115 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1116 taskqueue_thread_enqueue, &pf->tq);
1117 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1118 device_get_nameunit(dev));
1124 ixl_setup_adminq_tq(struct ixl_pf *pf)
1126 device_t dev = pf->dev;
1129 /* Tasklet for Admin Queue interrupts */
1130 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1133 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1135 /* Create and start Admin Queue taskqueue */
1136 pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1137 taskqueue_thread_enqueue, &pf->tq);
1139 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1142 error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1143 device_get_nameunit(dev));
1145 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1147 taskqueue_free(pf->tq);
1154 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1156 struct ixl_queue *que = vsi->queues;
1157 device_t dev = vsi->dev;
1163 /* Create queue tasks and start queue taskqueues */
1164 for (int i = 0; i < vsi->num_queues; i++, que++) {
1165 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1166 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1167 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1168 taskqueue_thread_enqueue, &que->tq);
1170 CPU_SETOF(cpu_id, &cpu_mask);
1171 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1172 &cpu_mask, "%s (bucket %d)",
1173 device_get_nameunit(dev), cpu_id);
1175 taskqueue_start_threads(&que->tq, 1, PI_NET,
1176 "%s (que %d)", device_get_nameunit(dev), que->me);
1184 ixl_free_adminq_tq(struct ixl_pf *pf)
1187 taskqueue_free(pf->tq);
1193 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1195 struct ixl_queue *que = vsi->queues;
1197 for (int i = 0; i < vsi->num_queues; i++, que++) {
1199 taskqueue_free(que->tq);
1206 ixl_setup_adminq_msix(struct ixl_pf *pf)
1208 device_t dev = pf->dev;
1211 /* Admin IRQ rid is 1, vector is 0 */
1213 /* Get interrupt resource from bus */
1214 pf->res = bus_alloc_resource_any(dev,
1215 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1217 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1218 " interrupt failed [rid=%d]\n", rid);
1221 /* Then associate interrupt with handler */
1222 error = bus_setup_intr(dev, pf->res,
1223 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1224 ixl_msix_adminq, pf, &pf->tag);
1227 device_printf(dev, "bus_setup_intr() for Admin Queue"
1228 " interrupt handler failed, error %d\n", error);
1231 error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1233 /* Probably non-fatal? */
1234 device_printf(dev, "bus_describe_intr() for Admin Queue"
1235 " interrupt name failed, error %d\n", error);
1243 * Allocate interrupt resources from bus and associate an interrupt handler
1244 * to those for the VSI's queues.
1247 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1249 device_t dev = vsi->dev;
1250 struct ixl_queue *que = vsi->queues;
1251 struct tx_ring *txr;
1252 int error, rid, vector = 1;
1254 /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1255 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1259 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1260 RF_SHAREABLE | RF_ACTIVE);
1262 device_printf(dev, "bus_alloc_resource_any() for"
1263 " Queue %d interrupt failed [rid=%d]\n",
1267 /* Set the handler function */
1268 error = bus_setup_intr(dev, que->res,
1269 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1270 ixl_msix_que, que, &que->tag);
1272 device_printf(dev, "bus_setup_intr() for Queue %d"
1273 " interrupt handler failed, error %d\n",
1277 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1279 device_printf(dev, "bus_describe_intr() for Queue %d"
1280 " interrupt name failed, error %d\n",
1283 /* Bind the vector to a CPU */
1285 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1287 error = bus_bind_intr(dev, que->res, cpu_id);
1289 device_printf(dev, "bus_bind_intr() for Queue %d"
1290 " to CPU %d failed, error %d\n",
1291 que->me, cpu_id, error);
1300 * When used in a virtualized environment PCI BUSMASTER capability may not be set
1301 * so explicity set it here and rewrite the ENABLE in the MSIX control register
1302 * at this point to cause the host to successfully initialize us.
1305 ixl_set_busmaster(device_t dev)
1310 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1311 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1312 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1314 pci_find_cap(dev, PCIY_MSIX, &rid);
1315 rid += PCIR_MSIX_CTRL;
1316 msix_ctrl = pci_read_config(dev, rid, 2);
1317 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1318 pci_write_config(dev, rid, msix_ctrl, 2);
1322 * Allocate MSI/X vectors from the OS.
1323 * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1326 ixl_init_msix(struct ixl_pf *pf)
1328 device_t dev = pf->dev;
1329 struct i40e_hw *hw = &pf->hw;
1330 int auto_max_queues;
1331 int rid, want, vectors, queues, available;
1333 /* Override by tuneable */
1334 if (!pf->enable_msix)
1337 /* Ensure proper operation in virtualized environment */
1338 ixl_set_busmaster(dev);
1340 /* First try MSI/X */
1341 rid = PCIR_BAR(IXL_BAR);
1342 pf->msix_mem = bus_alloc_resource_any(dev,
1343 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1344 if (!pf->msix_mem) {
1345 /* May not be enabled */
1346 device_printf(pf->dev,
1347 "Unable to map MSIX table\n");
1351 available = pci_msix_count(dev);
1352 if (available < 2) {
1353 /* system has msix disabled (0), or only one vector (1) */
1354 bus_release_resource(dev, SYS_RES_MEMORY,
1356 pf->msix_mem = NULL;
1360 /* Clamp max number of queues based on:
1361 * - # of MSI-X vectors available
1362 * - # of cpus available
1363 * - # of queues that can be assigned to the LAN VSI
1365 auto_max_queues = min(mp_ncpus, available - 1);
1366 if (hw->mac.type == I40E_MAC_X722)
1367 auto_max_queues = min(auto_max_queues, 128);
1369 auto_max_queues = min(auto_max_queues, 64);
1371 /* Override with tunable value if tunable is less than autoconfig count */
1372 if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1373 queues = pf->max_queues;
1374 /* Use autoconfig amount if that's lower */
1375 else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1376 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1377 "autoconfig amount (%d)...\n",
1378 pf->max_queues, auto_max_queues);
1379 queues = auto_max_queues;
1381 /* Limit maximum auto-configured queues to 8 if no user value is set */
1383 queues = min(auto_max_queues, 8);
1386 /* If we're doing RSS, clamp at the number of RSS buckets */
1387 if (queues > rss_getnumbuckets())
1388 queues = rss_getnumbuckets();
1392 ** Want one vector (RX/TX pair) per queue
1393 ** plus an additional for the admin queue.
1396 if (want <= available) /* Have enough */
1399 device_printf(pf->dev,
1400 "MSIX Configuration Problem, "
1401 "%d vectors available but %d wanted!\n",
1403 return (0); /* Will go to Legacy setup */
1406 if (pci_alloc_msix(dev, &vectors) == 0) {
1407 device_printf(pf->dev,
1408 "Using MSIX interrupts with %d vectors\n", vectors);
1410 pf->vsi.num_queues = queues;
1413 * If we're doing RSS, the number of queues needs to
1414 * match the number of RSS buckets that are configured.
1416 * + If there's more queues than RSS buckets, we'll end
1417 * up with queues that get no traffic.
1419 * + If there's more RSS buckets than queues, we'll end
1420 * up having multiple RSS buckets map to the same queue,
1421 * so there'll be some contention.
1423 if (queues != rss_getnumbuckets()) {
1425 "%s: queues (%d) != RSS buckets (%d)"
1426 "; performance will be impacted.\n",
1427 __func__, queues, rss_getnumbuckets());
1433 vectors = pci_msi_count(dev);
1434 pf->vsi.num_queues = 1;
1436 pf->enable_msix = 0;
1437 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1438 device_printf(pf->dev, "Using an MSI interrupt\n");
1441 device_printf(pf->dev, "Using a Legacy interrupt\n");
1447 * Configure admin queue/misc interrupt cause registers in hardware.
1450 ixl_configure_intr0_msix(struct ixl_pf *pf)
1452 struct i40e_hw *hw = &pf->hw;
1455 /* First set up the adminq - vector 0 */
1456 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
1457 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
1459 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1460 I40E_PFINT_ICR0_ENA_GRST_MASK |
1461 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1462 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1463 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1464 I40E_PFINT_ICR0_ENA_VFLR_MASK |
1465 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1466 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1469 * 0x7FF is the end of the queue list.
1470 * This means we won't use MSI-X vector 0 for a queue interrupt
1473 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1474 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1475 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1477 wr32(hw, I40E_PFINT_DYN_CTL0,
1478 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1479 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1481 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1485 * Configure queue interrupt cause registers in hardware.
1488 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1490 struct i40e_hw *hw = &pf->hw;
1491 struct ixl_vsi *vsi = &pf->vsi;
1495 for (int i = 0; i < vsi->num_queues; i++, vector++) {
1496 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1497 /* First queue type is RX / 0 */
1498 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1500 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1501 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1502 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1503 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1504 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1505 wr32(hw, I40E_QINT_RQCTL(i), reg);
1507 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1508 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1509 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1510 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1511 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1512 wr32(hw, I40E_QINT_TQCTL(i), reg);
1517 * Configure for MSI single vector operation
1520 ixl_configure_legacy(struct ixl_pf *pf)
1522 struct i40e_hw *hw = &pf->hw;
1525 wr32(hw, I40E_PFINT_ITR0(0), 0);
1526 wr32(hw, I40E_PFINT_ITR0(1), 0);
1528 /* Setup "other" causes */
1529 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1530 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1531 | I40E_PFINT_ICR0_ENA_GRST_MASK
1532 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1533 | I40E_PFINT_ICR0_ENA_GPIO_MASK
1534 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1535 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1536 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1537 | I40E_PFINT_ICR0_ENA_VFLR_MASK
1538 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1540 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1542 /* SW_ITR_IDX = 0, but don't change INTENA */
1543 wr32(hw, I40E_PFINT_DYN_CTL0,
1544 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
1545 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
1546 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
1547 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1549 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1550 wr32(hw, I40E_PFINT_LNKLST0, 0);
1552 /* Associate the queue pair to the vector and enable the q int */
1553 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1554 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1555 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1556 wr32(hw, I40E_QINT_RQCTL(0), reg);
1558 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1559 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1560 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1561 wr32(hw, I40E_QINT_TQCTL(0), reg);
1565 ixl_allocate_pci_resources(struct ixl_pf *pf)
1568 struct i40e_hw *hw = &pf->hw;
1569 device_t dev = pf->dev;
1573 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1576 if (!(pf->pci_mem)) {
1577 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1581 /* Save off the PCI information */
1582 hw->vendor_id = pci_get_vendor(dev);
1583 hw->device_id = pci_get_device(dev);
1584 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1585 hw->subsystem_vendor_id =
1586 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1587 hw->subsystem_device_id =
1588 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1590 hw->bus.device = pci_get_slot(dev);
1591 hw->bus.func = pci_get_function(dev);
1593 /* Save off register access information */
1594 pf->osdep.mem_bus_space_tag =
1595 rman_get_bustag(pf->pci_mem);
1596 pf->osdep.mem_bus_space_handle =
1597 rman_get_bushandle(pf->pci_mem);
1598 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1599 pf->osdep.flush_reg = I40E_GLGEN_STAT;
1600 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1602 pf->hw.back = &pf->osdep;
1608 * Teardown and release the admin queue/misc vector
1612 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1614 device_t dev = pf->dev;
1617 if (pf->admvec) /* we are doing MSIX */
1618 rid = pf->admvec + 1;
1620 (pf->msix != 0) ? (rid = 1):(rid = 0);
1622 if (pf->tag != NULL) {
1623 bus_teardown_intr(dev, pf->res, pf->tag);
1626 if (pf->res != NULL) {
1627 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1635 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1637 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1638 struct ixl_queue *que = vsi->queues;
1639 device_t dev = vsi->dev;
1642 /* We may get here before stations are setup */
1643 if ((!pf->enable_msix) || (que == NULL))
1646 /* Release all MSIX queue resources */
1647 for (int i = 0; i < vsi->num_queues; i++, que++) {
1648 rid = que->msix + 1;
1649 if (que->tag != NULL) {
1650 error = bus_teardown_intr(dev, que->res, que->tag);
1652 device_printf(dev, "bus_teardown_intr() for"
1653 " Queue %d interrupt failed\n",
1659 if (que->res != NULL) {
1660 error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1662 device_printf(dev, "bus_release_resource() for"
1663 " Queue %d interrupt failed [rid=%d]\n",
1675 ixl_free_pci_resources(struct ixl_pf *pf)
1677 device_t dev = pf->dev;
1680 ixl_teardown_queue_msix(&pf->vsi);
1681 ixl_teardown_adminq_msix(pf);
1684 pci_release_msi(dev);
1686 memrid = PCIR_BAR(IXL_BAR);
1688 if (pf->msix_mem != NULL)
1689 bus_release_resource(dev, SYS_RES_MEMORY,
1690 memrid, pf->msix_mem);
1692 if (pf->pci_mem != NULL)
1693 bus_release_resource(dev, SYS_RES_MEMORY,
1694 PCIR_BAR(0), pf->pci_mem);
1700 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
1702 /* Display supported media types */
1703 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
1704 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1706 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
1707 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1708 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
1709 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1710 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
1711 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1713 if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
1714 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
1715 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
1716 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1718 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
1719 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1720 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
1721 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1722 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
1723 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1725 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
1726 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
1727 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
1728 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
1729 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
1730 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1731 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
1732 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1733 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
1734 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1736 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
1737 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1739 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
1740 || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
1741 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1742 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
1743 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1744 if (phy_type & (1 << I40E_PHY_TYPE_SFI))
1745 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1746 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
1747 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1748 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
1749 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1751 if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
1752 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1754 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
1755 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1756 if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
1757 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1760 /*********************************************************************
1762 * Setup networking device structure and register an interface.
1764 **********************************************************************/
1766 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1769 struct i40e_hw *hw = vsi->hw;
1770 struct ixl_queue *que = vsi->queues;
1771 struct i40e_aq_get_phy_abilities_resp abilities;
1772 enum i40e_status_code aq_error = 0;
1774 INIT_DEBUGOUT("ixl_setup_interface: begin");
1776 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1778 device_printf(dev, "can not allocate ifnet structure\n");
1781 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1782 ifp->if_mtu = ETHERMTU;
1783 ifp->if_baudrate = IF_Gbps(40);
1784 ifp->if_init = ixl_init;
1785 ifp->if_softc = vsi;
1786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1787 ifp->if_ioctl = ixl_ioctl;
1789 #if __FreeBSD_version >= 1100036
1790 if_setgetcounterfn(ifp, ixl_get_counter);
1793 ifp->if_transmit = ixl_mq_start;
1795 ifp->if_qflush = ixl_qflush;
1797 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1799 vsi->max_frame_size =
1800 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1801 + ETHER_VLAN_ENCAP_LEN;
1803 /* Set TSO limits */
1804 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1805 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1806 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1809 * Tell the upper layer(s) we support long frames.
1811 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1813 ifp->if_capabilities |= IFCAP_HWCSUM;
1814 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1815 ifp->if_capabilities |= IFCAP_TSO;
1816 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1817 ifp->if_capabilities |= IFCAP_LRO;
1819 /* VLAN capabilties */
1820 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1823 | IFCAP_VLAN_HWCSUM;
1824 ifp->if_capenable = ifp->if_capabilities;
1827 ** Don't turn this on by default, if vlans are
1828 ** created on another pseudo device (eg. lagg)
1829 ** then vlan events are not passed thru, breaking
1830 ** operation, but with HW FILTER off it works. If
1831 ** using vlans directly on the ixl driver you can
1832 ** enable this and get full hardware tag filtering.
1834 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1837 * Specify the media types supported by this adapter and register
1838 * callbacks to update media and link information
1840 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1843 aq_error = i40e_aq_get_phy_capabilities(hw,
1844 FALSE, TRUE, &abilities, NULL);
1845 /* May need delay to detect fiber correctly */
1846 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1847 i40e_msec_delay(200);
1848 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1849 TRUE, &abilities, NULL);
1852 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1853 device_printf(dev, "Unknown PHY type detected!\n");
1856 "Error getting supported media types, err %d,"
1857 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1861 ixl_add_ifmedia(vsi, abilities.phy_type);
1863 /* Use autoselect media by default */
1864 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1865 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1867 ether_ifattach(ifp, hw->mac.addr);
1873 ** Run when the Admin Queue gets a link state change interrupt.
1876 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1878 struct i40e_hw *hw = &pf->hw;
1879 device_t dev = pf->dev;
1880 struct i40e_aqc_get_link_status *status =
1881 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1883 /* Request link status from adapter */
1884 hw->phy.get_link_info = TRUE;
1885 i40e_get_link_status(hw, &pf->link_up);
1887 /* Print out message if an unqualified module is found */
1888 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1889 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1890 (!(status->link_info & I40E_AQ_LINK_UP)))
1891 device_printf(dev, "Link failed because "
1892 "an unqualified module was detected!\n");
1894 /* Update OS link info */
1895 ixl_update_link_status(pf);
1898 /*********************************************************************
1900 * Get Firmware Switch configuration
1901 * - this will need to be more robust when more complex
1902 * switch configurations are enabled.
1904 **********************************************************************/
1906 ixl_switch_config(struct ixl_pf *pf)
1908 struct i40e_hw *hw = &pf->hw;
1909 struct ixl_vsi *vsi = &pf->vsi;
1910 device_t dev = vsi->dev;
1911 struct i40e_aqc_get_switch_config_resp *sw_config;
1912 u8 aq_buf[I40E_AQ_LARGE_BUF];
1916 memset(&aq_buf, 0, sizeof(aq_buf));
1917 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1918 ret = i40e_aq_get_switch_config(hw, sw_config,
1919 sizeof(aq_buf), &next, NULL);
1921 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1922 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1925 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1927 "Switch config: header reported: %d in structure, %d total\n",
1928 sw_config->header.num_reported, sw_config->header.num_total);
1929 for (int i = 0; i < sw_config->header.num_reported; i++) {
1931 "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1932 sw_config->element[i].element_type,
1933 sw_config->element[i].seid,
1934 sw_config->element[i].uplink_seid,
1935 sw_config->element[i].downlink_seid);
1938 /* Simplified due to a single VSI */
1939 vsi->uplink_seid = sw_config->element[0].uplink_seid;
1940 vsi->downlink_seid = sw_config->element[0].downlink_seid;
1941 vsi->seid = sw_config->element[0].seid;
1945 /*********************************************************************
1947 * Initialize the VSI: this handles contexts, which means things
1948 * like the number of descriptors, buffer size,
1949 * plus we init the rings thru this function.
1951 **********************************************************************/
1953 ixl_initialize_vsi(struct ixl_vsi *vsi)
1955 struct ixl_pf *pf = vsi->back;
1956 struct ixl_queue *que = vsi->queues;
1957 device_t dev = vsi->dev;
1958 struct i40e_hw *hw = vsi->hw;
1959 struct i40e_vsi_context ctxt;
1963 memset(&ctxt, 0, sizeof(ctxt));
1964 ctxt.seid = vsi->seid;
1965 if (pf->veb_seid != 0)
1966 ctxt.uplink_seid = pf->veb_seid;
1967 ctxt.pf_num = hw->pf_id;
1968 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1970 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1971 " aq_error %d\n", err, hw->aq.asq_last_status);
1974 ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1975 "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1976 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1977 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1978 ctxt.uplink_seid, ctxt.vsi_number,
1979 ctxt.vsis_allocated, ctxt.vsis_unallocated,
1980 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1981 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1983 ** Set the queue and traffic class bits
1984 ** - when multiple traffic classes are supported
1985 ** this will need to be more robust.
1987 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1988 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1989 /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1990 ctxt.info.queue_mapping[0] = 0;
1992 * This VSI will only use traffic class 0; start traffic class 0's
1993 * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1994 * the driver may not use all of them).
1996 tc_queues = bsrl(pf->qtag.num_allocated);
1997 ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1998 & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1999 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2000 & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2002 /* Set VLAN receive stripping mode */
2003 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2004 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2005 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2006 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2008 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2010 /* Save VSI number and info for use later */
2011 vsi->vsi_num = ctxt.vsi_number;
2012 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2014 /* Reset VSI statistics */
2015 ixl_vsi_reset_stats(vsi);
2016 vsi->hw_filters_add = 0;
2017 vsi->hw_filters_del = 0;
2019 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2021 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2023 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2024 " aq_error %d\n", err, hw->aq.asq_last_status);
2028 for (int i = 0; i < vsi->num_queues; i++, que++) {
2029 struct tx_ring *txr = &que->txr;
2030 struct rx_ring *rxr = &que->rxr;
2031 struct i40e_hmc_obj_txq tctx;
2032 struct i40e_hmc_obj_rxq rctx;
2036 /* Setup the HMC TX Context */
2037 size = que->num_desc * sizeof(struct i40e_tx_desc);
2038 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2039 tctx.new_context = 1;
2040 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2041 tctx.qlen = que->num_desc;
2043 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2044 /* Enable HEAD writeback */
2045 tctx.head_wb_ena = 1;
2046 tctx.head_wb_addr = txr->dma.pa +
2047 (que->num_desc * sizeof(struct i40e_tx_desc));
2048 tctx.rdylist_act = 0;
2049 err = i40e_clear_lan_tx_queue_context(hw, i);
2051 device_printf(dev, "Unable to clear TX context\n");
2054 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2056 device_printf(dev, "Unable to set TX context\n");
2059 /* Associate the ring with this PF */
2060 txctl = I40E_QTX_CTL_PF_QUEUE;
2061 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2062 I40E_QTX_CTL_PF_INDX_MASK);
2063 wr32(hw, I40E_QTX_CTL(i), txctl);
2066 /* Do ring (re)init */
2067 ixl_init_tx_ring(que);
2069 /* Next setup the HMC RX Context */
2070 if (vsi->max_frame_size <= MCLBYTES)
2071 rxr->mbuf_sz = MCLBYTES;
2073 rxr->mbuf_sz = MJUMPAGESIZE;
2075 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2077 /* Set up an RX context for the HMC */
2078 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2079 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2080 /* ignore header split for now */
2081 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2082 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2083 vsi->max_frame_size : max_rxmax;
2085 rctx.dsize = 1; /* do 32byte descriptors */
2086 rctx.hsplit_0 = 0; /* no HDR split initially */
2087 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2088 rctx.qlen = que->num_desc;
2089 rctx.tphrdesc_ena = 1;
2090 rctx.tphwdesc_ena = 1;
2091 rctx.tphdata_ena = 0;
2092 rctx.tphhead_ena = 0;
2093 rctx.lrxqthresh = 2;
2100 err = i40e_clear_lan_rx_queue_context(hw, i);
2103 "Unable to clear RX context %d\n", i);
2106 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2108 device_printf(dev, "Unable to set RX context %d\n", i);
2111 err = ixl_init_rx_ring(que);
2113 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2117 /* preserve queue */
2118 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2119 struct netmap_adapter *na = NA(vsi->ifp);
2120 struct netmap_kring *kring = &na->rx_rings[i];
2121 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2122 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2124 #endif /* DEV_NETMAP */
2125 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2131 /*********************************************************************
2133 * Free all VSI structs.
2135 **********************************************************************/
2137 ixl_free_vsi(struct ixl_vsi *vsi)
2139 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2140 struct ixl_queue *que = vsi->queues;
2142 /* Free station queues */
2146 for (int i = 0; i < vsi->num_queues; i++, que++) {
2147 struct tx_ring *txr = &que->txr;
2148 struct rx_ring *rxr = &que->rxr;
2150 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2153 ixl_free_que_tx(que);
2155 i40e_free_dma_mem(&pf->hw, &txr->dma);
2157 IXL_TX_LOCK_DESTROY(txr);
2159 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2162 ixl_free_que_rx(que);
2164 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2166 IXL_RX_LOCK_DESTROY(rxr);
2168 free(vsi->queues, M_DEVBUF);
2171 /* Free VSI filter list */
2172 ixl_free_mac_filters(vsi);
2176 ixl_free_mac_filters(struct ixl_vsi *vsi)
2178 struct ixl_mac_filter *f;
2180 while (!SLIST_EMPTY(&vsi->ftl)) {
2181 f = SLIST_FIRST(&vsi->ftl);
2182 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2188 * Fill out fields in queue struct and setup tx/rx memory and structs
2191 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2193 device_t dev = pf->dev;
2194 struct i40e_hw *hw = &pf->hw;
2195 struct ixl_vsi *vsi = &pf->vsi;
2196 struct tx_ring *txr = &que->txr;
2197 struct rx_ring *rxr = &que->rxr;
2201 /* ERJ: A lot of references to external objects... */
2202 que->num_desc = pf->ringsz;
2207 txr->tail = I40E_QTX_TAIL(que->me);
2209 /* Initialize the TX lock */
2210 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2211 device_get_nameunit(dev), que->me);
2212 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2213 /* Create the TX descriptor ring */
2214 tsize = roundup2((que->num_desc *
2215 sizeof(struct i40e_tx_desc)) +
2216 sizeof(u32), DBA_ALIGN);
2217 if (i40e_allocate_dma_mem(hw,
2218 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2220 "Unable to allocate TX Descriptor memory\n");
2224 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2225 bzero((void *)txr->base, tsize);
2226 /* Now allocate transmit soft structs for the ring */
2227 if (ixl_allocate_tx_data(que)) {
2229 "Critical Failure setting up TX structures\n");
2233 /* Allocate a buf ring */
2234 txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2235 M_NOWAIT, &txr->mtx);
2236 if (txr->br == NULL) {
2238 "Critical Failure setting up TX buf ring\n");
2243 rsize = roundup2(que->num_desc *
2244 sizeof(union i40e_rx_desc), DBA_ALIGN);
2246 rxr->tail = I40E_QRX_TAIL(que->me);
2248 /* Initialize the RX side lock */
2249 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2250 device_get_nameunit(dev), que->me);
2251 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2253 if (i40e_allocate_dma_mem(hw,
2254 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2256 "Unable to allocate RX Descriptor memory\n");
2260 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2261 bzero((void *)rxr->base, rsize);
2262 /* Allocate receive soft structs for the ring*/
2263 if (ixl_allocate_rx_data(que)) {
2265 "Critical Failure setting up receive structs\n");
2273 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2274 if (mtx_initialized(&rxr->mtx))
2275 mtx_destroy(&rxr->mtx);
2277 buf_ring_free(txr->br, M_DEVBUF);
2281 i40e_free_dma_mem(&pf->hw, &txr->dma);
2282 if (mtx_initialized(&txr->mtx))
2283 mtx_destroy(&txr->mtx);
2288 /*********************************************************************
2290 * Allocate memory for the VSI (virtual station interface) and their
2291 * associated queues, rings and the descriptors associated with each,
2292 * called only once at attach.
2294 **********************************************************************/
2296 ixl_setup_stations(struct ixl_pf *pf)
2298 device_t dev = pf->dev;
2299 struct ixl_vsi *vsi;
2300 struct ixl_queue *que;
2304 vsi->back = (void *)pf;
2310 /* Get memory for the station queues */
2312 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2313 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2314 device_printf(dev, "Unable to allocate queue memory\n");
2319 for (int i = 0; i < vsi->num_queues; i++) {
2320 que = &vsi->queues[i];
2321 error = ixl_setup_queue(que, pf, i);
2330 ** Provide a update to the queue RX
2331 ** interrupt moderation value.
2334 ixl_set_queue_rx_itr(struct ixl_queue *que)
2336 struct ixl_vsi *vsi = que->vsi;
2337 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2338 struct i40e_hw *hw = vsi->hw;
2339 struct rx_ring *rxr = &que->rxr;
2344 /* Idle, do nothing */
2345 if (rxr->bytes == 0)
2348 if (pf->dynamic_rx_itr) {
2349 rx_bytes = rxr->bytes/rxr->itr;
2352 /* Adjust latency range */
2353 switch (rxr->latency) {
2354 case IXL_LOW_LATENCY:
2355 if (rx_bytes > 10) {
2356 rx_latency = IXL_AVE_LATENCY;
2357 rx_itr = IXL_ITR_20K;
2360 case IXL_AVE_LATENCY:
2361 if (rx_bytes > 20) {
2362 rx_latency = IXL_BULK_LATENCY;
2363 rx_itr = IXL_ITR_8K;
2364 } else if (rx_bytes <= 10) {
2365 rx_latency = IXL_LOW_LATENCY;
2366 rx_itr = IXL_ITR_100K;
2369 case IXL_BULK_LATENCY:
2370 if (rx_bytes <= 20) {
2371 rx_latency = IXL_AVE_LATENCY;
2372 rx_itr = IXL_ITR_20K;
2377 rxr->latency = rx_latency;
2379 if (rx_itr != rxr->itr) {
2380 /* do an exponential smoothing */
2381 rx_itr = (10 * rx_itr * rxr->itr) /
2382 ((9 * rx_itr) + rxr->itr);
2383 rxr->itr = rx_itr & IXL_MAX_ITR;
2384 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2385 que->me), rxr->itr);
2387 } else { /* We may have have toggled to non-dynamic */
2388 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2389 vsi->rx_itr_setting = pf->rx_itr;
2390 /* Update the hardware if needed */
2391 if (rxr->itr != vsi->rx_itr_setting) {
2392 rxr->itr = vsi->rx_itr_setting;
2393 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2394 que->me), rxr->itr);
2404 ** Provide a update to the queue TX
2405 ** interrupt moderation value.
2408 ixl_set_queue_tx_itr(struct ixl_queue *que)
2410 struct ixl_vsi *vsi = que->vsi;
2411 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2412 struct i40e_hw *hw = vsi->hw;
2413 struct tx_ring *txr = &que->txr;
2419 /* Idle, do nothing */
2420 if (txr->bytes == 0)
2423 if (pf->dynamic_tx_itr) {
2424 tx_bytes = txr->bytes/txr->itr;
2427 switch (txr->latency) {
2428 case IXL_LOW_LATENCY:
2429 if (tx_bytes > 10) {
2430 tx_latency = IXL_AVE_LATENCY;
2431 tx_itr = IXL_ITR_20K;
2434 case IXL_AVE_LATENCY:
2435 if (tx_bytes > 20) {
2436 tx_latency = IXL_BULK_LATENCY;
2437 tx_itr = IXL_ITR_8K;
2438 } else if (tx_bytes <= 10) {
2439 tx_latency = IXL_LOW_LATENCY;
2440 tx_itr = IXL_ITR_100K;
2443 case IXL_BULK_LATENCY:
2444 if (tx_bytes <= 20) {
2445 tx_latency = IXL_AVE_LATENCY;
2446 tx_itr = IXL_ITR_20K;
2451 txr->latency = tx_latency;
2453 if (tx_itr != txr->itr) {
2454 /* do an exponential smoothing */
2455 tx_itr = (10 * tx_itr * txr->itr) /
2456 ((9 * tx_itr) + txr->itr);
2457 txr->itr = tx_itr & IXL_MAX_ITR;
2458 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2459 que->me), txr->itr);
2462 } else { /* We may have have toggled to non-dynamic */
2463 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2464 vsi->tx_itr_setting = pf->tx_itr;
2465 /* Update the hardware if needed */
2466 if (txr->itr != vsi->tx_itr_setting) {
2467 txr->itr = vsi->tx_itr_setting;
2468 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2469 que->me), txr->itr);
2478 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2479 struct sysctl_ctx_list *ctx, const char *sysctl_name)
2481 struct sysctl_oid *tree;
2482 struct sysctl_oid_list *child;
2483 struct sysctl_oid_list *vsi_list;
2485 tree = device_get_sysctl_tree(pf->dev);
2486 child = SYSCTL_CHILDREN(tree);
2487 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2488 CTLFLAG_RD, NULL, "VSI Number");
2489 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2491 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2496 * ixl_sysctl_qtx_tail_handler
2497 * Retrieves I40E_QTX_TAIL value from hardware
2501 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2503 struct ixl_queue *que;
2507 que = ((struct ixl_queue *)oidp->oid_arg1);
2510 val = rd32(que->vsi->hw, que->txr.tail);
2511 error = sysctl_handle_int(oidp, &val, 0, req);
2512 if (error || !req->newptr)
2518 * ixl_sysctl_qrx_tail_handler
2519 * Retrieves I40E_QRX_TAIL value from hardware
2523 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2525 struct ixl_queue *que;
2529 que = ((struct ixl_queue *)oidp->oid_arg1);
2532 val = rd32(que->vsi->hw, que->rxr.tail);
2533 error = sysctl_handle_int(oidp, &val, 0, req);
2534 if (error || !req->newptr)
2541 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2542 * Writes to the ITR registers immediately.
2545 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2547 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2548 device_t dev = pf->dev;
2550 int requested_tx_itr;
2552 requested_tx_itr = pf->tx_itr;
2553 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2554 if ((error) || (req->newptr == NULL))
2556 if (pf->dynamic_tx_itr) {
2558 "Cannot set TX itr value while dynamic TX itr is enabled\n");
2561 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2563 "Invalid TX itr value; value must be between 0 and %d\n",
2568 pf->tx_itr = requested_tx_itr;
2569 ixl_configure_tx_itr(pf);
2575 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2576 * Writes to the ITR registers immediately.
2579 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2581 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2582 device_t dev = pf->dev;
2584 int requested_rx_itr;
2586 requested_rx_itr = pf->rx_itr;
2587 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2588 if ((error) || (req->newptr == NULL))
2590 if (pf->dynamic_rx_itr) {
2592 "Cannot set RX itr value while dynamic RX itr is enabled\n");
2595 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2597 "Invalid RX itr value; value must be between 0 and %d\n",
2602 pf->rx_itr = requested_rx_itr;
2603 ixl_configure_rx_itr(pf);
2609 ixl_add_hw_stats(struct ixl_pf *pf)
2611 device_t dev = pf->dev;
2612 struct ixl_vsi *vsi = &pf->vsi;
2613 struct ixl_queue *queues = vsi->queues;
2614 struct i40e_hw_port_stats *pf_stats = &pf->stats;
2616 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2617 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2618 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2619 struct sysctl_oid_list *vsi_list;
2621 struct sysctl_oid *queue_node;
2622 struct sysctl_oid_list *queue_list;
2624 struct tx_ring *txr;
2625 struct rx_ring *rxr;
2626 char queue_namebuf[QUEUE_NAME_LEN];
2628 /* Driver statistics */
2629 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2630 CTLFLAG_RD, &pf->watchdog_events,
2631 "Watchdog timeouts");
2632 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2633 CTLFLAG_RD, &pf->admin_irq,
2634 "Admin Queue IRQ Handled");
2636 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2637 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2639 /* Queue statistics */
2640 for (int q = 0; q < vsi->num_queues; q++) {
2641 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2642 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2643 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2644 queue_list = SYSCTL_CHILDREN(queue_node);
2646 txr = &(queues[q].txr);
2647 rxr = &(queues[q].rxr);
2649 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2650 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2651 "m_defrag() failed");
2652 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2653 CTLFLAG_RD, &(queues[q].irqs),
2654 "irqs on this queue");
2655 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2656 CTLFLAG_RD, &(queues[q].tso),
2658 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2659 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2660 "Driver tx dma failure in xmit");
2661 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2662 CTLFLAG_RD, &(txr->no_desc),
2663 "Queue No Descriptor Available");
2664 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2665 CTLFLAG_RD, &(txr->total_packets),
2666 "Queue Packets Transmitted");
2667 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2668 CTLFLAG_RD, &(txr->tx_bytes),
2669 "Queue Bytes Transmitted");
2670 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2671 CTLFLAG_RD, &(rxr->rx_packets),
2672 "Queue Packets Received");
2673 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2674 CTLFLAG_RD, &(rxr->rx_bytes),
2675 "Queue Bytes Received");
2676 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2677 CTLFLAG_RD, &(rxr->desc_errs),
2678 "Queue Rx Descriptor Errors");
2679 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2680 CTLFLAG_RD, &(rxr->itr), 0,
2681 "Queue Rx ITR Interval");
2682 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2683 CTLFLAG_RD, &(txr->itr), 0,
2684 "Queue Tx ITR Interval");
2686 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2687 CTLFLAG_RD, &(rxr->not_done),
2688 "Queue Rx Descriptors not Done");
2689 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2690 CTLFLAG_RD, &(rxr->next_refresh), 0,
2691 "Queue Rx Descriptors not Done");
2692 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2693 CTLFLAG_RD, &(rxr->next_check), 0,
2694 "Queue Rx Descriptors not Done");
2695 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
2696 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2697 sizeof(struct ixl_queue),
2698 ixl_sysctl_qtx_tail_handler, "IU",
2699 "Queue Transmit Descriptor Tail");
2700 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2701 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2702 sizeof(struct ixl_queue),
2703 ixl_sysctl_qrx_tail_handler, "IU",
2704 "Queue Receive Descriptor Tail");
2709 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2713 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2714 struct sysctl_oid_list *child,
2715 struct i40e_eth_stats *eth_stats)
2717 struct ixl_sysctl_info ctls[] =
2719 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2720 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
2721 "Unicast Packets Received"},
2722 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
2723 "Multicast Packets Received"},
2724 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
2725 "Broadcast Packets Received"},
2726 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2727 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2728 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2729 {ð_stats->tx_multicast, "mcast_pkts_txd",
2730 "Multicast Packets Transmitted"},
2731 {ð_stats->tx_broadcast, "bcast_pkts_txd",
2732 "Broadcast Packets Transmitted"},
2737 struct ixl_sysctl_info *entry = ctls;
2738 while (entry->stat != 0)
2740 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2741 CTLFLAG_RD, entry->stat,
2742 entry->description);
2748 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2749 struct sysctl_oid_list *child,
2750 struct i40e_hw_port_stats *stats)
2752 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2753 CTLFLAG_RD, NULL, "Mac Statistics");
2754 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2756 struct i40e_eth_stats *eth_stats = &stats->eth;
2757 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2759 struct ixl_sysctl_info ctls[] =
2761 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2762 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2763 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2764 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2765 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2766 /* Packet Reception Stats */
2767 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2768 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2769 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2770 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2771 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2772 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2773 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2774 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2775 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2776 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2777 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2778 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2779 /* Packet Transmission Stats */
2780 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2781 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2782 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2783 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2784 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2785 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2786 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2788 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2789 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2790 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2791 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2796 struct ixl_sysctl_info *entry = ctls;
2797 while (entry->stat != 0)
2799 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2800 CTLFLAG_RD, entry->stat,
2801 entry->description);
2807 ixl_set_rss_key(struct ixl_pf *pf)
2809 struct i40e_hw *hw = &pf->hw;
2810 struct ixl_vsi *vsi = &pf->vsi;
2811 device_t dev = pf->dev;
2812 enum i40e_status_code status;
2814 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2816 u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2817 0x183cfd8c, 0xce880440, 0x580cbc3c,
2818 0x35897377, 0x328b25e1, 0x4fa98922,
2819 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2824 /* Fetch the configured RSS key */
2825 rss_getkey((uint8_t *) &rss_seed);
2827 /* Fill out hash function seed */
2828 if (hw->mac.type == I40E_MAC_X722) {
2829 struct i40e_aqc_get_set_rss_key_data key_data;
2830 bcopy(rss_seed, key_data.standard_rss_key, 40);
2831 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2833 device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2834 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2836 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2837 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2842 * Configure enabled PCTYPES for RSS.
2845 ixl_set_rss_pctypes(struct ixl_pf *pf)
2847 struct i40e_hw *hw = &pf->hw;
2848 u64 set_hena = 0, hena;
2851 u32 rss_hash_config;
2853 rss_hash_config = rss_gethashconfig();
2854 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2855 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2856 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2857 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2858 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2859 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2860 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2861 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2862 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2863 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2864 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2865 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2866 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2867 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2869 set_hena = IXL_DEFAULT_RSS_HENA;
2871 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2872 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2874 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2875 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2880 ixl_set_rss_hlut(struct ixl_pf *pf)
2882 struct i40e_hw *hw = &pf->hw;
2883 device_t dev = pf->dev;
2884 struct ixl_vsi *vsi = &pf->vsi;
2886 int lut_entry_width;
2888 enum i40e_status_code status;
2890 if (hw->mac.type == I40E_MAC_X722)
2891 lut_entry_width = 7;
2893 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
2895 /* Populate the LUT with max no. of queues in round robin fashion */
2897 for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
2900 * Fetch the RSS bucket id for the given indirection entry.
2901 * Cap it at the number of configured buckets (which is
2904 que_id = rss_get_indirection_to_bucket(i);
2905 que_id = que_id % vsi->num_queues;
2907 que_id = i % vsi->num_queues;
2909 lut = (que_id & ((0x1 << lut_entry_width) - 1));
2913 if (hw->mac.type == I40E_MAC_X722) {
2914 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
2916 device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
2917 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2919 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
2920 wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
2926 ** Setup the PF's RSS parameters.
2929 ixl_config_rss(struct ixl_pf *pf)
2931 ixl_set_rss_key(pf);
2932 ixl_set_rss_pctypes(pf);
2933 ixl_set_rss_hlut(pf);
2937 ** This routine is run via an vlan config EVENT,
2938 ** it enables us to use the HW Filter table since
2939 ** we can get the vlan id. This just creates the
2940 ** entry in the soft version of the VFTA, init will
2941 ** repopulate the real table.
2944 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2946 struct ixl_vsi *vsi = ifp->if_softc;
2947 struct i40e_hw *hw = vsi->hw;
2948 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2950 if (ifp->if_softc != arg) /* Not our event */
2953 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2958 ixl_add_filter(vsi, hw->mac.addr, vtag);
2963 ** This routine is run via an vlan
2964 ** unconfig EVENT, remove our entry
2965 ** in the soft vfta.
2968 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2970 struct ixl_vsi *vsi = ifp->if_softc;
2971 struct i40e_hw *hw = vsi->hw;
2972 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2974 if (ifp->if_softc != arg)
2977 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2982 ixl_del_filter(vsi, hw->mac.addr, vtag);
2987 ** This routine updates vlan filters, called by init
2988 ** it scans the filter table and then updates the hw
2989 ** after a soft reset.
2992 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
2994 struct ixl_mac_filter *f;
2997 if (vsi->num_vlans == 0)
3000 ** Scan the filter list for vlan entries,
3001 ** mark them for addition and then call
3002 ** for the AQ update.
3004 SLIST_FOREACH(f, &vsi->ftl, next) {
3005 if (f->flags & IXL_FILTER_VLAN) {
3013 printf("setup vlan: no filters found!\n");
3016 flags = IXL_FILTER_VLAN;
3017 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3018 ixl_add_hw_filters(vsi, flags, cnt);
3023 ** Initialize filter list and add filters that the hardware
3024 ** needs to know about.
3026 ** Requires VSI's filter list & seid to be set before calling.
3029 ixl_init_filters(struct ixl_vsi *vsi)
3031 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3033 /* Add broadcast address */
3034 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3037 * Prevent Tx flow control frames from being sent out by
3038 * non-firmware transmitters.
3039 * This affects every VSI in the PF.
3041 if (pf->enable_tx_fc_filter)
3042 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3046 ** This routine adds mulicast filters
3049 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3051 struct ixl_mac_filter *f;
3053 /* Does one already exist */
3054 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3058 f = ixl_get_filter(vsi);
3060 printf("WARNING: no filter available!!\n");
3063 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3064 f->vlan = IXL_VLAN_ANY;
3065 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3072 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3074 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3078 ** This routine adds macvlan filters
3081 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3083 struct ixl_mac_filter *f, *tmp;
3087 DEBUGOUT("ixl_add_filter: begin");
3092 /* Does one already exist */
3093 f = ixl_find_filter(vsi, macaddr, vlan);
3097 ** Is this the first vlan being registered, if so we
3098 ** need to remove the ANY filter that indicates we are
3099 ** not in a vlan, and replace that with a 0 filter.
3101 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3102 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3104 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3105 ixl_add_filter(vsi, macaddr, 0);
3109 f = ixl_get_filter(vsi);
3111 device_printf(dev, "WARNING: no filter available!!\n");
3114 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3116 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3117 if (f->vlan != IXL_VLAN_ANY)
3118 f->flags |= IXL_FILTER_VLAN;
3122 ixl_add_hw_filters(vsi, f->flags, 1);
3127 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3129 struct ixl_mac_filter *f;
3131 f = ixl_find_filter(vsi, macaddr, vlan);
3135 f->flags |= IXL_FILTER_DEL;
3136 ixl_del_hw_filters(vsi, 1);
3139 /* Check if this is the last vlan removal */
3140 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3141 /* Switch back to a non-vlan filter */
3142 ixl_del_filter(vsi, macaddr, 0);
3143 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3149 ** Find the filter with both matching mac addr and vlan id
3151 struct ixl_mac_filter *
3152 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3154 struct ixl_mac_filter *f;
3157 SLIST_FOREACH(f, &vsi->ftl, next) {
3158 if (!cmp_etheraddr(f->macaddr, macaddr))
3160 if (f->vlan == vlan) {
3172 ** This routine takes additions to the vsi filter
3173 ** table and creates an Admin Queue call to create
3174 ** the filters in the hardware.
3177 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3179 struct i40e_aqc_add_macvlan_element_data *a, *b;
3180 struct ixl_mac_filter *f;
3189 IXL_PF_LOCK_ASSERT(pf);
3191 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3192 M_DEVBUF, M_NOWAIT | M_ZERO);
3194 device_printf(dev, "add_hw_filters failed to get memory\n");
3199 ** Scan the filter list, each time we find one
3200 ** we add it to the admin queue array and turn off
3203 SLIST_FOREACH(f, &vsi->ftl, next) {
3204 if (f->flags == flags) {
3205 b = &a[j]; // a pox on fvl long names :)
3206 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3207 if (f->vlan == IXL_VLAN_ANY) {
3209 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3211 b->vlan_tag = f->vlan;
3214 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3215 f->flags &= ~IXL_FILTER_ADD;
3222 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3224 device_printf(dev, "aq_add_macvlan err %d, "
3225 "aq_error %d\n", err, hw->aq.asq_last_status);
3227 vsi->hw_filters_add += j;
3234 ** This routine takes removals in the vsi filter
3235 ** table and creates an Admin Queue call to delete
3236 ** the filters in the hardware.
3239 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3241 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3245 struct ixl_mac_filter *f, *f_temp;
3248 DEBUGOUT("ixl_del_hw_filters: begin\n");
3254 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3255 M_DEVBUF, M_NOWAIT | M_ZERO);
3257 printf("del hw filter failed to get memory\n");
3261 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3262 if (f->flags & IXL_FILTER_DEL) {
3263 e = &d[j]; // a pox on fvl long names :)
3264 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3265 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3266 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3267 /* delete entry from vsi list */
3268 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3276 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3277 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3279 for (int i = 0; i < j; i++)
3280 sc += (!d[i].error_code);
3281 vsi->hw_filters_del += sc;
3283 "Failed to remove %d/%d filters, aq error %d\n",
3284 j - sc, j, hw->aq.asq_last_status);
3286 vsi->hw_filters_del += j;
3290 DEBUGOUT("ixl_del_hw_filters: end\n");
3295 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3297 struct i40e_hw *hw = &pf->hw;
3302 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3304 ixl_dbg(pf, IXL_DBG_EN_DIS,
3305 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3308 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3310 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3311 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3312 I40E_QTX_ENA_QENA_STAT_MASK;
3313 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3314 /* Verify the enable took */
3315 for (int j = 0; j < 10; j++) {
3316 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3317 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3319 i40e_msec_delay(10);
3321 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3322 device_printf(pf->dev, "TX queue %d still disabled!\n",
3331 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3333 struct i40e_hw *hw = &pf->hw;
3338 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3340 ixl_dbg(pf, IXL_DBG_EN_DIS,
3341 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3344 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3345 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3346 I40E_QRX_ENA_QENA_STAT_MASK;
3347 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3348 /* Verify the enable took */
3349 for (int j = 0; j < 10; j++) {
3350 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3351 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3353 i40e_msec_delay(10);
3355 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3356 device_printf(pf->dev, "RX queue %d still disabled!\n",
3365 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3369 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3370 /* Called function already prints error message */
3373 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3377 /* For PF VSI only */
3379 ixl_enable_rings(struct ixl_vsi *vsi)
3381 struct ixl_pf *pf = vsi->back;
3384 for (int i = 0; i < vsi->num_queues; i++) {
3385 error = ixl_enable_ring(pf, &pf->qtag, i);
3394 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3396 struct i40e_hw *hw = &pf->hw;
3401 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3403 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3404 i40e_usec_delay(500);
3406 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3407 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3408 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3409 /* Verify the disable took */
3410 for (int j = 0; j < 10; j++) {
3411 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3412 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3414 i40e_msec_delay(10);
3416 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3417 device_printf(pf->dev, "TX queue %d still enabled!\n",
3426 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3428 struct i40e_hw *hw = &pf->hw;
3433 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3435 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3436 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3437 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3438 /* Verify the disable took */
3439 for (int j = 0; j < 10; j++) {
3440 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3441 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3443 i40e_msec_delay(10);
3445 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3446 device_printf(pf->dev, "RX queue %d still enabled!\n",
3455 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3459 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3460 /* Called function already prints error message */
3463 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3467 /* For PF VSI only */
3469 ixl_disable_rings(struct ixl_vsi *vsi)
3471 struct ixl_pf *pf = vsi->back;
3474 for (int i = 0; i < vsi->num_queues; i++) {
3475 error = ixl_disable_ring(pf, &pf->qtag, i);
3484 * ixl_handle_mdd_event
3486 * Called from interrupt handler to identify possibly malicious vfs
3487 * (But also detects events from the PF, as well)
3490 ixl_handle_mdd_event(struct ixl_pf *pf)
3492 struct i40e_hw *hw = &pf->hw;
3493 device_t dev = pf->dev;
3494 bool mdd_detected = false;
3495 bool pf_mdd_detected = false;
3498 /* find what triggered the MDD event */
3499 reg = rd32(hw, I40E_GL_MDET_TX);
3500 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3501 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3502 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3503 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3504 I40E_GL_MDET_TX_EVENT_SHIFT;
3505 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3506 I40E_GL_MDET_TX_QUEUE_SHIFT;
3508 "Malicious Driver Detection event %d"
3509 " on TX queue %d, pf number %d\n",
3510 event, queue, pf_num);
3511 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3512 mdd_detected = true;
3514 reg = rd32(hw, I40E_GL_MDET_RX);
3515 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3516 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3517 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3518 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3519 I40E_GL_MDET_RX_EVENT_SHIFT;
3520 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3521 I40E_GL_MDET_RX_QUEUE_SHIFT;
3523 "Malicious Driver Detection event %d"
3524 " on RX queue %d, pf number %d\n",
3525 event, queue, pf_num);
3526 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3527 mdd_detected = true;
3531 reg = rd32(hw, I40E_PF_MDET_TX);
3532 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3533 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3535 "MDD TX event is for this function!");
3536 pf_mdd_detected = true;
3538 reg = rd32(hw, I40E_PF_MDET_RX);
3539 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3540 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3542 "MDD RX event is for this function!");
3543 pf_mdd_detected = true;
3547 /* re-enable mdd interrupt cause */
3548 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3549 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3550 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3555 ixl_enable_intr(struct ixl_vsi *vsi)
3557 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3558 struct i40e_hw *hw = vsi->hw;
3559 struct ixl_queue *que = vsi->queues;
3561 if (pf->enable_msix) {
3562 for (int i = 0; i < vsi->num_queues; i++, que++)
3563 ixl_enable_queue(hw, que->me);
3565 ixl_enable_legacy(hw);
3569 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3571 struct i40e_hw *hw = vsi->hw;
3572 struct ixl_queue *que = vsi->queues;
3574 for (int i = 0; i < vsi->num_queues; i++, que++)
3575 ixl_disable_queue(hw, que->me);
3579 ixl_disable_intr(struct ixl_vsi *vsi)
3581 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3582 struct i40e_hw *hw = vsi->hw;
3584 if (pf->enable_msix)
3585 ixl_disable_adminq(hw);
3587 ixl_disable_legacy(hw);
3591 ixl_enable_adminq(struct i40e_hw *hw)
3595 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3596 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3597 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3598 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3603 ixl_disable_adminq(struct i40e_hw *hw)
3607 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3608 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3613 ixl_enable_queue(struct i40e_hw *hw, int id)
3617 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3618 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3619 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3620 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3624 ixl_disable_queue(struct i40e_hw *hw, int id)
3628 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3629 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3633 ixl_enable_legacy(struct i40e_hw *hw)
3636 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3637 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3638 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3639 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3643 ixl_disable_legacy(struct i40e_hw *hw)
3647 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3648 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3652 ixl_update_stats_counters(struct ixl_pf *pf)
3654 struct i40e_hw *hw = &pf->hw;
3655 struct ixl_vsi *vsi = &pf->vsi;
3658 struct i40e_hw_port_stats *nsd = &pf->stats;
3659 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3661 /* Update hw stats */
3662 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3663 pf->stat_offsets_loaded,
3664 &osd->crc_errors, &nsd->crc_errors);
3665 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3666 pf->stat_offsets_loaded,
3667 &osd->illegal_bytes, &nsd->illegal_bytes);
3668 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3669 I40E_GLPRT_GORCL(hw->port),
3670 pf->stat_offsets_loaded,
3671 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3672 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3673 I40E_GLPRT_GOTCL(hw->port),
3674 pf->stat_offsets_loaded,
3675 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3676 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3677 pf->stat_offsets_loaded,
3678 &osd->eth.rx_discards,
3679 &nsd->eth.rx_discards);
3680 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3681 I40E_GLPRT_UPRCL(hw->port),
3682 pf->stat_offsets_loaded,
3683 &osd->eth.rx_unicast,
3684 &nsd->eth.rx_unicast);
3685 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3686 I40E_GLPRT_UPTCL(hw->port),
3687 pf->stat_offsets_loaded,
3688 &osd->eth.tx_unicast,
3689 &nsd->eth.tx_unicast);
3690 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3691 I40E_GLPRT_MPRCL(hw->port),
3692 pf->stat_offsets_loaded,
3693 &osd->eth.rx_multicast,
3694 &nsd->eth.rx_multicast);
3695 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3696 I40E_GLPRT_MPTCL(hw->port),
3697 pf->stat_offsets_loaded,
3698 &osd->eth.tx_multicast,
3699 &nsd->eth.tx_multicast);
3700 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3701 I40E_GLPRT_BPRCL(hw->port),
3702 pf->stat_offsets_loaded,
3703 &osd->eth.rx_broadcast,
3704 &nsd->eth.rx_broadcast);
3705 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3706 I40E_GLPRT_BPTCL(hw->port),
3707 pf->stat_offsets_loaded,
3708 &osd->eth.tx_broadcast,
3709 &nsd->eth.tx_broadcast);
3711 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3712 pf->stat_offsets_loaded,
3713 &osd->tx_dropped_link_down,
3714 &nsd->tx_dropped_link_down);
3715 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3716 pf->stat_offsets_loaded,
3717 &osd->mac_local_faults,
3718 &nsd->mac_local_faults);
3719 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3720 pf->stat_offsets_loaded,
3721 &osd->mac_remote_faults,
3722 &nsd->mac_remote_faults);
3723 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3724 pf->stat_offsets_loaded,
3725 &osd->rx_length_errors,
3726 &nsd->rx_length_errors);
3728 /* Flow control (LFC) stats */
3729 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3730 pf->stat_offsets_loaded,
3731 &osd->link_xon_rx, &nsd->link_xon_rx);
3732 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3733 pf->stat_offsets_loaded,
3734 &osd->link_xon_tx, &nsd->link_xon_tx);
3735 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3736 pf->stat_offsets_loaded,
3737 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3738 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3739 pf->stat_offsets_loaded,
3740 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3742 /* Packet size stats rx */
3743 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3744 I40E_GLPRT_PRC64L(hw->port),
3745 pf->stat_offsets_loaded,
3746 &osd->rx_size_64, &nsd->rx_size_64);
3747 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3748 I40E_GLPRT_PRC127L(hw->port),
3749 pf->stat_offsets_loaded,
3750 &osd->rx_size_127, &nsd->rx_size_127);
3751 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3752 I40E_GLPRT_PRC255L(hw->port),
3753 pf->stat_offsets_loaded,
3754 &osd->rx_size_255, &nsd->rx_size_255);
3755 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3756 I40E_GLPRT_PRC511L(hw->port),
3757 pf->stat_offsets_loaded,
3758 &osd->rx_size_511, &nsd->rx_size_511);
3759 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3760 I40E_GLPRT_PRC1023L(hw->port),
3761 pf->stat_offsets_loaded,
3762 &osd->rx_size_1023, &nsd->rx_size_1023);
3763 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3764 I40E_GLPRT_PRC1522L(hw->port),
3765 pf->stat_offsets_loaded,
3766 &osd->rx_size_1522, &nsd->rx_size_1522);
3767 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3768 I40E_GLPRT_PRC9522L(hw->port),
3769 pf->stat_offsets_loaded,
3770 &osd->rx_size_big, &nsd->rx_size_big);
3772 /* Packet size stats tx */
3773 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3774 I40E_GLPRT_PTC64L(hw->port),
3775 pf->stat_offsets_loaded,
3776 &osd->tx_size_64, &nsd->tx_size_64);
3777 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3778 I40E_GLPRT_PTC127L(hw->port),
3779 pf->stat_offsets_loaded,
3780 &osd->tx_size_127, &nsd->tx_size_127);
3781 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3782 I40E_GLPRT_PTC255L(hw->port),
3783 pf->stat_offsets_loaded,
3784 &osd->tx_size_255, &nsd->tx_size_255);
3785 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3786 I40E_GLPRT_PTC511L(hw->port),
3787 pf->stat_offsets_loaded,
3788 &osd->tx_size_511, &nsd->tx_size_511);
3789 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3790 I40E_GLPRT_PTC1023L(hw->port),
3791 pf->stat_offsets_loaded,
3792 &osd->tx_size_1023, &nsd->tx_size_1023);
3793 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3794 I40E_GLPRT_PTC1522L(hw->port),
3795 pf->stat_offsets_loaded,
3796 &osd->tx_size_1522, &nsd->tx_size_1522);
3797 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3798 I40E_GLPRT_PTC9522L(hw->port),
3799 pf->stat_offsets_loaded,
3800 &osd->tx_size_big, &nsd->tx_size_big);
3802 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3803 pf->stat_offsets_loaded,
3804 &osd->rx_undersize, &nsd->rx_undersize);
3805 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3806 pf->stat_offsets_loaded,
3807 &osd->rx_fragments, &nsd->rx_fragments);
3808 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3809 pf->stat_offsets_loaded,
3810 &osd->rx_oversize, &nsd->rx_oversize);
3811 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3812 pf->stat_offsets_loaded,
3813 &osd->rx_jabber, &nsd->rx_jabber);
3814 pf->stat_offsets_loaded = true;
3817 /* Update vsi stats */
3818 ixl_update_vsi_stats(vsi);
3820 for (int i = 0; i < pf->num_vfs; i++) {
3822 if (vf->vf_flags & VF_FLAG_ENABLED)
3823 ixl_update_eth_stats(&pf->vfs[i].vsi);
3828 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3830 struct i40e_hw *hw = &pf->hw;
3831 struct ixl_vsi *vsi = &pf->vsi;
3832 device_t dev = pf->dev;
3836 is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3841 error = i40e_shutdown_lan_hmc(hw);
3844 "Shutdown LAN HMC failed with code %d\n", error);
3845 ixl_disable_adminq(hw);
3846 ixl_teardown_adminq_msix(pf);
3847 error = i40e_shutdown_adminq(hw);
3850 "Shutdown Admin queue failed with code %d\n", error);
3853 error = i40e_init_adminq(hw);
3854 if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3855 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3858 error = ixl_setup_adminq_msix(pf);
3860 device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3863 ixl_configure_intr0_msix(pf);
3864 ixl_enable_adminq(hw);
3865 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3866 hw->func_caps.num_rx_qp, 0, 0);
3868 device_printf(dev, "init_lan_hmc failed: %d\n", error);
3870 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3872 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3881 ixl_handle_empr_reset(struct ixl_pf *pf)
3883 struct i40e_hw *hw = &pf->hw;
3884 device_t dev = pf->dev;
3888 /* Typically finishes within 3-4 seconds */
3889 while (count++ < 100) {
3890 reg = rd32(hw, I40E_GLGEN_RSTAT)
3891 & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3893 i40e_msec_delay(100);
3897 ixl_dbg(pf, IXL_DBG_INFO,
3898 "EMPR reset wait count: %d\n", count);
3900 device_printf(dev, "Rebuilding driver state...\n");
3901 ixl_rebuild_hw_structs_after_reset(pf);
3902 device_printf(dev, "Rebuilding driver state done.\n");
3904 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
3908 ** Tasklet handler for MSIX Adminq interrupts
3909 ** - do outside interrupt since it might sleep
3912 ixl_do_adminq(void *context, int pending)
3914 struct ixl_pf *pf = context;
3915 struct i40e_hw *hw = &pf->hw;
3916 struct i40e_arq_event_info event;
3918 device_t dev = pf->dev;
3922 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
3923 /* Flag cleared at end of this function */
3924 ixl_handle_empr_reset(pf);
3928 /* Admin Queue handling */
3929 event.buf_len = IXL_AQ_BUF_SZ;
3930 event.msg_buf = malloc(event.buf_len,
3931 M_DEVBUF, M_NOWAIT | M_ZERO);
3932 if (!event.msg_buf) {
3933 device_printf(dev, "%s: Unable to allocate memory for Admin"
3934 " Queue event!\n", __func__);
3939 /* clean and process any events */
3941 ret = i40e_clean_arq_element(hw, &event, &result);
3944 opcode = LE16_TO_CPU(event.desc.opcode);
3945 ixl_dbg(pf, IXL_DBG_AQ,
3946 "%s: Admin Queue event: %#06x\n", __func__, opcode);
3948 case i40e_aqc_opc_get_link_status:
3949 ixl_link_event(pf, &event);
3951 case i40e_aqc_opc_send_msg_to_pf:
3953 ixl_handle_vf_msg(pf, &event);
3956 case i40e_aqc_opc_event_lan_overflow:
3961 } while (result && (loop++ < IXL_ADM_LIMIT));
3963 free(event.msg_buf, M_DEVBUF);
3966 * If there are still messages to process, reschedule ourselves.
3967 * Otherwise, re-enable our interrupt.
3970 taskqueue_enqueue(pf->tq, &pf->adminq);
3972 ixl_enable_adminq(hw);
3978 * Update VSI-specific ethernet statistics counters.
3981 ixl_update_eth_stats(struct ixl_vsi *vsi)
3983 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3984 struct i40e_hw *hw = &pf->hw;
3985 struct i40e_eth_stats *es;
3986 struct i40e_eth_stats *oes;
3987 struct i40e_hw_port_stats *nsd;
3988 u16 stat_idx = vsi->info.stat_counter_idx;
3990 es = &vsi->eth_stats;
3991 oes = &vsi->eth_stats_offsets;
3994 /* Gather up the stats that the hw collects */
3995 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
3996 vsi->stat_offsets_loaded,
3997 &oes->tx_errors, &es->tx_errors);
3998 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
3999 vsi->stat_offsets_loaded,
4000 &oes->rx_discards, &es->rx_discards);
4002 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4003 I40E_GLV_GORCL(stat_idx),
4004 vsi->stat_offsets_loaded,
4005 &oes->rx_bytes, &es->rx_bytes);
4006 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4007 I40E_GLV_UPRCL(stat_idx),
4008 vsi->stat_offsets_loaded,
4009 &oes->rx_unicast, &es->rx_unicast);
4010 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4011 I40E_GLV_MPRCL(stat_idx),
4012 vsi->stat_offsets_loaded,
4013 &oes->rx_multicast, &es->rx_multicast);
4014 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4015 I40E_GLV_BPRCL(stat_idx),
4016 vsi->stat_offsets_loaded,
4017 &oes->rx_broadcast, &es->rx_broadcast);
4019 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4020 I40E_GLV_GOTCL(stat_idx),
4021 vsi->stat_offsets_loaded,
4022 &oes->tx_bytes, &es->tx_bytes);
4023 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4024 I40E_GLV_UPTCL(stat_idx),
4025 vsi->stat_offsets_loaded,
4026 &oes->tx_unicast, &es->tx_unicast);
4027 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4028 I40E_GLV_MPTCL(stat_idx),
4029 vsi->stat_offsets_loaded,
4030 &oes->tx_multicast, &es->tx_multicast);
4031 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4032 I40E_GLV_BPTCL(stat_idx),
4033 vsi->stat_offsets_loaded,
4034 &oes->tx_broadcast, &es->tx_broadcast);
4035 vsi->stat_offsets_loaded = true;
4039 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4043 struct i40e_eth_stats *es;
4046 struct i40e_hw_port_stats *nsd;
4050 es = &vsi->eth_stats;
4053 ixl_update_eth_stats(vsi);
4055 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4056 for (int i = 0; i < vsi->num_queues; i++)
4057 tx_discards += vsi->queues[i].txr.br->br_drops;
4059 /* Update ifnet stats */
4060 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4063 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4066 IXL_SET_IBYTES(vsi, es->rx_bytes);
4067 IXL_SET_OBYTES(vsi, es->tx_bytes);
4068 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4069 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4071 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4072 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4074 IXL_SET_OERRORS(vsi, es->tx_errors);
4075 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4076 IXL_SET_OQDROPS(vsi, tx_discards);
4077 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4078 IXL_SET_COLLISIONS(vsi, 0);
4082 * Reset all of the stats for the given pf
4085 ixl_pf_reset_stats(struct ixl_pf *pf)
4087 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4088 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4089 pf->stat_offsets_loaded = false;
4093 * Resets all stats of the given vsi
4096 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4098 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4099 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4100 vsi->stat_offsets_loaded = false;
4104 * Read and update a 48 bit stat from the hw
4106 * Since the device stats are not reset at PFReset, they likely will not
4107 * be zeroed when the driver starts. We'll save the first values read
4108 * and use them as offsets to be subtracted from the raw values in order
4109 * to report stats that count from zero.
4112 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4113 bool offset_loaded, u64 *offset, u64 *stat)
4117 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4118 new_data = rd64(hw, loreg);
4121 * Use two rd32's instead of one rd64; FreeBSD versions before
4122 * 10 don't support 64-bit bus reads/writes.
4124 new_data = rd32(hw, loreg);
4125 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4130 if (new_data >= *offset)
4131 *stat = new_data - *offset;
4133 *stat = (new_data + ((u64)1 << 48)) - *offset;
4134 *stat &= 0xFFFFFFFFFFFFULL;
4138 * Read and update a 32 bit stat from the hw
4141 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4142 bool offset_loaded, u64 *offset, u64 *stat)
4146 new_data = rd32(hw, reg);
4149 if (new_data >= *offset)
4150 *stat = (u32)(new_data - *offset);
4152 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4156 ixl_add_device_sysctls(struct ixl_pf *pf)
4158 device_t dev = pf->dev;
4160 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4161 struct sysctl_oid_list *ctx_list =
4162 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4164 struct sysctl_oid *debug_node;
4165 struct sysctl_oid_list *debug_list;
4167 /* Set up sysctls */
4168 SYSCTL_ADD_PROC(ctx, ctx_list,
4169 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4170 pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4172 SYSCTL_ADD_PROC(ctx, ctx_list,
4173 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4174 pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4176 SYSCTL_ADD_PROC(ctx, ctx_list,
4177 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4178 pf, 0, ixl_current_speed, "A", "Current Port Speed");
4180 SYSCTL_ADD_PROC(ctx, ctx_list,
4181 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4182 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4184 SYSCTL_ADD_PROC(ctx, ctx_list,
4185 OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4186 pf, 0, ixl_sysctl_unallocated_queues, "I",
4187 "Queues not allocated to a PF or VF");
4189 SYSCTL_ADD_PROC(ctx, ctx_list,
4190 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4191 pf, 0, ixl_sysctl_pf_tx_itr, "I",
4192 "Immediately set TX ITR value for all queues");
4194 SYSCTL_ADD_PROC(ctx, ctx_list,
4195 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4196 pf, 0, ixl_sysctl_pf_rx_itr, "I",
4197 "Immediately set RX ITR value for all queues");
4199 SYSCTL_ADD_INT(ctx, ctx_list,
4200 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4201 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4203 SYSCTL_ADD_INT(ctx, ctx_list,
4204 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4205 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4207 /* Add sysctls meant to print debug information, but don't list them
4208 * in "sysctl -a" output. */
4209 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4210 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4211 debug_list = SYSCTL_CHILDREN(debug_node);
4213 SYSCTL_ADD_UINT(ctx, debug_list,
4214 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4215 &pf->hw.debug_mask, 0, "Shared code debug message level");
4217 SYSCTL_ADD_UINT(ctx, debug_list,
4218 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4219 &pf->dbg_mask, 0, "Non-hared code debug message level");
4221 SYSCTL_ADD_PROC(ctx, debug_list,
4222 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4223 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4225 SYSCTL_ADD_PROC(ctx, debug_list,
4226 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4227 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4229 SYSCTL_ADD_PROC(ctx, debug_list,
4230 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4231 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4233 SYSCTL_ADD_PROC(ctx, debug_list,
4234 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4235 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4237 SYSCTL_ADD_PROC(ctx, debug_list,
4238 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4239 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4241 SYSCTL_ADD_PROC(ctx, debug_list,
4242 OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4243 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4245 SYSCTL_ADD_PROC(ctx, debug_list,
4246 OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4247 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4249 SYSCTL_ADD_UINT(ctx, debug_list,
4250 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4251 0, "PF/VF Virtual Channel debug level");
4256 * Primarily for finding out how many queues can be assigned to VFs,
4260 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4262 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4266 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4269 return sysctl_handle_int(oidp, NULL, queues, req);
4273 ** Set flow control using sysctl:
4280 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4282 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4283 struct i40e_hw *hw = &pf->hw;
4284 device_t dev = pf->dev;
4285 int requested_fc, error = 0;
4286 enum i40e_status_code aq_error = 0;
4290 requested_fc = pf->fc;
4291 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4292 if ((error) || (req->newptr == NULL))
4294 if (requested_fc < 0 || requested_fc > 3) {
4296 "Invalid fc mode; valid modes are 0 through 3\n");
4300 /* Set fc ability for port */
4301 hw->fc.requested_mode = requested_fc;
4302 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4305 "%s: Error setting new fc mode %d; fc_err %#x\n",
4306 __func__, aq_error, fc_aq_err);
4309 pf->fc = requested_fc;
4311 /* Get new link state */
4312 i40e_msec_delay(250);
4313 hw->phy.get_link_info = TRUE;
4314 i40e_get_link_status(hw, &pf->link_up);
4320 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4322 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4323 struct i40e_hw *hw = &pf->hw;
4324 int error = 0, index = 0;
4335 ixl_update_link_status(pf);
4337 switch (hw->phy.link_info.link_speed) {
4338 case I40E_LINK_SPEED_100MB:
4341 case I40E_LINK_SPEED_1GB:
4344 case I40E_LINK_SPEED_10GB:
4347 case I40E_LINK_SPEED_40GB:
4350 case I40E_LINK_SPEED_20GB:
4353 case I40E_LINK_SPEED_UNKNOWN:
4359 error = sysctl_handle_string(oidp, speeds[index],
4360 strlen(speeds[index]), req);
4365 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4367 struct i40e_hw *hw = &pf->hw;
4368 device_t dev = pf->dev;
4369 struct i40e_aq_get_phy_abilities_resp abilities;
4370 struct i40e_aq_set_phy_config config;
4371 enum i40e_status_code aq_error = 0;
4373 /* Get current capability information */
4374 aq_error = i40e_aq_get_phy_capabilities(hw,
4375 FALSE, FALSE, &abilities, NULL);
4378 "%s: Error getting phy capabilities %d,"
4379 " aq error: %d\n", __func__, aq_error,
4380 hw->aq.asq_last_status);
4384 /* Prepare new config */
4385 bzero(&config, sizeof(config));
4386 config.phy_type = abilities.phy_type;
4387 config.abilities = abilities.abilities
4388 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4389 config.eee_capability = abilities.eee_capability;
4390 config.eeer = abilities.eeer_val;
4391 config.low_power_ctrl = abilities.d3_lpan;
4392 /* Translate into aq cmd link_speed */
4394 config.link_speed |= I40E_LINK_SPEED_40GB;
4396 config.link_speed |= I40E_LINK_SPEED_20GB;
4398 config.link_speed |= I40E_LINK_SPEED_10GB;
4400 config.link_speed |= I40E_LINK_SPEED_1GB;
4402 config.link_speed |= I40E_LINK_SPEED_100MB;
4404 /* Do aq command & restart link */
4405 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4408 "%s: Error setting new phy config %d,"
4409 " aq error: %d\n", __func__, aq_error,
4410 hw->aq.asq_last_status);
4415 ** This seems a bit heavy handed, but we
4416 ** need to get a reinit on some devices
4419 ixl_stop_locked(pf);
4420 ixl_init_locked(pf);
4427 ** Control link advertise speed:
4429 ** 0x1 - advertise 100 Mb
4430 ** 0x2 - advertise 1G
4431 ** 0x4 - advertise 10G
4432 ** 0x8 - advertise 20G
4433 ** 0x10 - advertise 40G
4435 ** Set to 0 to disable link
4438 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4440 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4441 struct i40e_hw *hw = &pf->hw;
4442 device_t dev = pf->dev;
4443 int requested_ls = 0;
4446 /* Read in new mode */
4447 requested_ls = pf->advertised_speed;
4448 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4449 if ((error) || (req->newptr == NULL))
4451 /* Check for sane value */
4452 if (requested_ls > 0x10) {
4453 device_printf(dev, "Invalid advertised speed; "
4454 "valid modes are 0x1 through 0x10\n");
4457 /* Then check for validity based on adapter type */
4458 switch (hw->device_id) {
4459 case I40E_DEV_ID_1G_BASE_T_X722:
4461 if (requested_ls & ~(0x2)) {
4463 "Only 1G speeds supported on this device.\n");
4467 case I40E_DEV_ID_10G_BASE_T:
4468 case I40E_DEV_ID_10G_BASE_T4:
4470 if (requested_ls & ~(0x7)) {
4472 "Only 100M/1G/10G speeds supported on this device.\n");
4476 case I40E_DEV_ID_20G_KR2:
4477 case I40E_DEV_ID_20G_KR2_A:
4479 if (requested_ls & ~(0xE)) {
4481 "Only 1G/10G/20G speeds supported on this device.\n");
4485 case I40E_DEV_ID_KX_B:
4486 case I40E_DEV_ID_QSFP_A:
4487 case I40E_DEV_ID_QSFP_B:
4489 if (requested_ls & ~(0x10)) {
4491 "Only 40G speeds supported on this device.\n");
4497 if (requested_ls & ~(0x6)) {
4499 "Only 1/10G speeds supported on this device.\n");
4505 /* Exit if no change */
4506 if (pf->advertised_speed == requested_ls)
4509 error = ixl_set_advertised_speeds(pf, requested_ls);
4513 pf->advertised_speed = requested_ls;
4514 ixl_update_link_status(pf);
4519 ** Get the width and transaction speed of
4520 ** the bus this adapter is plugged into.
4523 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4528 /* Some devices don't use PCIE */
4529 if (hw->mac.type == I40E_MAC_X722)
4532 /* Read PCI Express Capabilities Link Status Register */
4533 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4534 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4536 /* Fill out hw struct with PCIE info */
4537 i40e_set_pci_config_data(hw, link);
4539 /* Use info to print out bandwidth messages */
4540 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4541 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4542 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4543 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4544 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4545 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4546 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4549 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4550 (hw->bus.speed < i40e_bus_speed_8000)) {
4551 device_printf(dev, "PCI-Express bandwidth available"
4552 " for this device may be insufficient for"
4553 " optimal performance.\n");
4554 device_printf(dev, "For optimal performance, a x8 "
4555 "PCIE Gen3 slot is required.\n");
4560 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4562 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4563 struct i40e_hw *hw = &pf->hw;
4566 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4567 ixl_nvm_version_str(hw, sbuf);
4575 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4577 if ((nvma->command == I40E_NVM_READ) &&
4578 ((nvma->config & 0xFF) == 0xF) &&
4579 (((nvma->config & 0xF00) >> 8) == 0xF) &&
4580 (nvma->offset == 0) &&
4581 (nvma->data_size == 1)) {
4582 // device_printf(dev, "- Get Driver Status Command\n");
4584 else if (nvma->command == I40E_NVM_READ) {
4588 switch (nvma->command) {
4590 device_printf(dev, "- command: I40E_NVM_READ\n");
4593 device_printf(dev, "- command: I40E_NVM_WRITE\n");
4596 device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4600 device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
4601 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4602 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4603 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4608 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4610 struct i40e_hw *hw = &pf->hw;
4611 struct i40e_nvm_access *nvma;
4612 device_t dev = pf->dev;
4613 enum i40e_status_code status = 0;
4616 DEBUGFUNC("ixl_handle_nvmupd_cmd");
4619 if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4620 ifd->ifd_data == NULL) {
4621 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4623 device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4624 __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4625 device_printf(dev, "%s: data pointer: %p\n", __func__,
4630 nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4632 if (pf->dbg_mask & IXL_DBG_NVMUPD)
4633 ixl_print_nvm_cmd(dev, nvma);
4635 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4637 while (count++ < 100) {
4638 i40e_msec_delay(100);
4639 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4644 if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4646 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4653 device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
4657 * -EPERM is actually ERESTART, which the kernel interprets as it needing
4658 * to run this ioctl again. So use -EACCES for -EPERM instead.
4660 if (perrno == -EPERM)
4666 /*********************************************************************
4668 * Media Ioctl callback
4670 * This routine is called whenever the user queries the status of
4671 * the interface using ifconfig.
4673 **********************************************************************/
4675 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4677 struct ixl_vsi *vsi = ifp->if_softc;
4678 struct ixl_pf *pf = vsi->back;
4679 struct i40e_hw *hw = &pf->hw;
4681 INIT_DEBUGOUT("ixl_media_status: begin");
4684 hw->phy.get_link_info = TRUE;
4685 i40e_get_link_status(hw, &pf->link_up);
4686 ixl_update_link_status(pf);
4688 ifmr->ifm_status = IFM_AVALID;
4689 ifmr->ifm_active = IFM_ETHER;
4696 ifmr->ifm_status |= IFM_ACTIVE;
4698 /* Hardware always does full-duplex */
4699 ifmr->ifm_active |= IFM_FDX;
4701 switch (hw->phy.link_info.phy_type) {
4703 case I40E_PHY_TYPE_100BASE_TX:
4704 ifmr->ifm_active |= IFM_100_TX;
4707 case I40E_PHY_TYPE_1000BASE_T:
4708 ifmr->ifm_active |= IFM_1000_T;
4710 case I40E_PHY_TYPE_1000BASE_SX:
4711 ifmr->ifm_active |= IFM_1000_SX;
4713 case I40E_PHY_TYPE_1000BASE_LX:
4714 ifmr->ifm_active |= IFM_1000_LX;
4716 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4717 ifmr->ifm_active |= IFM_OTHER;
4720 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4721 ifmr->ifm_active |= IFM_10G_TWINAX;
4723 case I40E_PHY_TYPE_10GBASE_SR:
4724 ifmr->ifm_active |= IFM_10G_SR;
4726 case I40E_PHY_TYPE_10GBASE_LR:
4727 ifmr->ifm_active |= IFM_10G_LR;
4729 case I40E_PHY_TYPE_10GBASE_T:
4730 ifmr->ifm_active |= IFM_10G_T;
4732 case I40E_PHY_TYPE_XAUI:
4733 case I40E_PHY_TYPE_XFI:
4734 case I40E_PHY_TYPE_10GBASE_AOC:
4735 ifmr->ifm_active |= IFM_OTHER;
4738 case I40E_PHY_TYPE_40GBASE_CR4:
4739 case I40E_PHY_TYPE_40GBASE_CR4_CU:
4740 ifmr->ifm_active |= IFM_40G_CR4;
4742 case I40E_PHY_TYPE_40GBASE_SR4:
4743 ifmr->ifm_active |= IFM_40G_SR4;
4745 case I40E_PHY_TYPE_40GBASE_LR4:
4746 ifmr->ifm_active |= IFM_40G_LR4;
4748 case I40E_PHY_TYPE_XLAUI:
4749 ifmr->ifm_active |= IFM_OTHER;
4751 case I40E_PHY_TYPE_1000BASE_KX:
4752 ifmr->ifm_active |= IFM_1000_KX;
4754 case I40E_PHY_TYPE_SGMII:
4755 ifmr->ifm_active |= IFM_1000_SGMII;
4757 /* ERJ: What's the difference between these? */
4758 case I40E_PHY_TYPE_10GBASE_CR1_CU:
4759 case I40E_PHY_TYPE_10GBASE_CR1:
4760 ifmr->ifm_active |= IFM_10G_CR1;
4762 case I40E_PHY_TYPE_10GBASE_KX4:
4763 ifmr->ifm_active |= IFM_10G_KX4;
4765 case I40E_PHY_TYPE_10GBASE_KR:
4766 ifmr->ifm_active |= IFM_10G_KR;
4768 case I40E_PHY_TYPE_SFI:
4769 ifmr->ifm_active |= IFM_10G_SFI;
4771 /* Our single 20G media type */
4772 case I40E_PHY_TYPE_20GBASE_KR2:
4773 ifmr->ifm_active |= IFM_20G_KR2;
4775 case I40E_PHY_TYPE_40GBASE_KR4:
4776 ifmr->ifm_active |= IFM_40G_KR4;
4778 case I40E_PHY_TYPE_XLPPI:
4779 case I40E_PHY_TYPE_40GBASE_AOC:
4780 ifmr->ifm_active |= IFM_40G_XLPPI;
4782 /* Unknown to driver */
4784 ifmr->ifm_active |= IFM_UNKNOWN;
4787 /* Report flow control status as well */
4788 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4789 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4790 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4791 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4799 struct ixl_pf *pf = arg;
4800 struct ixl_vsi *vsi = &pf->vsi;
4801 device_t dev = pf->dev;
4805 * If the aq is dead here, it probably means something outside of the driver
4806 * did something to the adapter, like a PF reset.
4807 * So rebuild the driver's state here if that occurs.
4809 if (!i40e_check_asq_alive(&pf->hw)) {
4810 device_printf(dev, "Admin Queue is down; resetting...\n");
4812 ixl_teardown_hw_structs(pf);
4818 * Set up LAN queue interrupts here.
4819 * Kernel interrupt setup functions cannot be called while holding a lock,
4820 * so this is done outside of init_locked().
4823 /* Teardown existing interrupts, if they exist */
4824 ixl_teardown_queue_msix(vsi);
4825 ixl_free_queue_tqs(vsi);
4826 /* Then set them up again */
4827 error = ixl_setup_queue_msix(vsi);
4829 device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4831 error = ixl_setup_queue_tqs(vsi);
4833 device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
4837 error = ixl_assign_vsi_legacy(pf);
4839 device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
4844 ixl_init_locked(pf);
4849 * NOTE: Fortville does not support forcing media speeds. Instead,
4850 * use the set_advertise sysctl to set the speeds Fortville
4851 * will advertise or be allowed to operate at.
4854 ixl_media_change(struct ifnet * ifp)
4856 struct ixl_vsi *vsi = ifp->if_softc;
4857 struct ifmedia *ifm = &vsi->media;
4859 INIT_DEBUGOUT("ixl_media_change: begin");
4861 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4864 if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
4869 /*********************************************************************
4872 * ixl_ioctl is called when the user wants to configure the
4875 * return 0 on success, positive on failure
4876 **********************************************************************/
4879 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
4881 struct ixl_vsi *vsi = ifp->if_softc;
4882 struct ixl_pf *pf = vsi->back;
4883 struct ifreq *ifr = (struct ifreq *)data;
4884 struct ifdrv *ifd = (struct ifdrv *)data;
4885 #if defined(INET) || defined(INET6)
4886 struct ifaddr *ifa = (struct ifaddr *)data;
4887 bool avoid_reset = FALSE;
4895 if (ifa->ifa_addr->sa_family == AF_INET)
4899 if (ifa->ifa_addr->sa_family == AF_INET6)
4902 #if defined(INET) || defined(INET6)
4904 ** Calling init results in link renegotiation,
4905 ** so we avoid doing it when possible.
4908 ifp->if_flags |= IFF_UP;
4909 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4912 if (!(ifp->if_flags & IFF_NOARP))
4913 arp_ifinit(ifp, ifa);
4916 error = ether_ioctl(ifp, command, data);
4920 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4921 if (ifr->ifr_mtu > IXL_MAX_FRAME -
4922 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
4926 ifp->if_mtu = ifr->ifr_mtu;
4927 vsi->max_frame_size =
4928 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
4929 + ETHER_VLAN_ENCAP_LEN;
4930 ixl_init_locked(pf);
4935 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4937 if (ifp->if_flags & IFF_UP) {
4938 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4939 if ((ifp->if_flags ^ pf->if_flags) &
4940 (IFF_PROMISC | IFF_ALLMULTI)) {
4941 ixl_set_promisc(vsi);
4949 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4955 pf->if_flags = ifp->if_flags;
4960 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
4963 /* NVM update command */
4964 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
4965 error = ixl_handle_nvmupd_cmd(pf, ifd);
4970 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
4971 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4973 ixl_disable_intr(vsi);
4975 ixl_enable_intr(vsi);
4980 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
4981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4983 ixl_disable_intr(vsi);
4985 ixl_enable_intr(vsi);
4992 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4993 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
4997 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4998 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5000 ixl_cap_txcsum_tso(vsi, ifp, mask);
5002 if (mask & IFCAP_RXCSUM)
5003 ifp->if_capenable ^= IFCAP_RXCSUM;
5004 if (mask & IFCAP_RXCSUM_IPV6)
5005 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5006 if (mask & IFCAP_LRO)
5007 ifp->if_capenable ^= IFCAP_LRO;
5008 if (mask & IFCAP_VLAN_HWTAGGING)
5009 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5010 if (mask & IFCAP_VLAN_HWFILTER)
5011 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5012 if (mask & IFCAP_VLAN_HWTSO)
5013 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5014 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5016 ixl_init_locked(pf);
5019 VLAN_CAPABILITIES(ifp);
5025 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5026 error = ether_ioctl(ifp, command, data);
5034 ixl_phy_type_string(u32 bit_pos)
5036 static char * phy_types_str[32] = {
5066 "1000BASE-T Optical",
5071 if (bit_pos > 31) return "Invalid";
5072 return phy_types_str[bit_pos];
5077 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5079 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5080 struct i40e_hw *hw = &pf->hw;
5081 device_t dev = pf->dev;
5082 struct i40e_link_status link_status;
5083 enum i40e_status_code status;
5087 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5089 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5093 status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
5096 "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
5097 __func__, i40e_stat_str(hw, status),
5098 i40e_aq_str(hw, hw->aq.asq_last_status));
5103 sbuf_printf(buf, "\n"
5104 "PHY Type : 0x%02x<%s>\n"
5106 "Link info: 0x%02x\n"
5107 "AN info : 0x%02x\n"
5108 "Ext info : 0x%02x\n"
5112 link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
5113 link_status.link_speed,
5114 link_status.link_info, link_status.an_info,
5115 link_status.ext_info, link_status.max_frame_size,
5117 (link_status.crc_enable) ? "Yes" : "No");
5119 error = sbuf_finish(buf);
5121 device_printf(dev, "Error finishing sbuf: %d\n", error);
5128 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5130 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5131 struct i40e_hw *hw = &pf->hw;
5132 device_t dev = pf->dev;
5133 enum i40e_status_code status;
5134 struct i40e_aq_get_phy_abilities_resp abilities;
5138 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5140 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5144 status = i40e_aq_get_phy_capabilities(hw,
5145 TRUE, FALSE, &abilities, NULL);
5148 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5149 __func__, i40e_stat_str(hw, status),
5150 i40e_aq_str(hw, hw->aq.asq_last_status));
5155 sbuf_printf(buf, "\n"
5157 abilities.phy_type);
5159 if (abilities.phy_type != 0) {
5160 sbuf_printf(buf, "<");
5161 for (int i = 0; i < 32; i++)
5162 if ((1 << i) & abilities.phy_type)
5163 sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
5164 sbuf_printf(buf, ">\n");
5173 "ID : %02x %02x %02x %02x\n"
5174 "ModType : %02x %02x %02x",
5175 abilities.link_speed,
5176 abilities.abilities, abilities.eee_capability,
5177 abilities.eeer_val, abilities.d3_lpan,
5178 abilities.phy_id[0], abilities.phy_id[1],
5179 abilities.phy_id[2], abilities.phy_id[3],
5180 abilities.module_type[0], abilities.module_type[1],
5181 abilities.module_type[2]);
5183 error = sbuf_finish(buf);
5185 device_printf(dev, "Error finishing sbuf: %d\n", error);
5192 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5194 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5195 struct ixl_vsi *vsi = &pf->vsi;
5196 struct ixl_mac_filter *f;
5201 int ftl_counter = 0;
5205 SLIST_FOREACH(f, &vsi->ftl, next) {
5210 sysctl_handle_string(oidp, "(none)", 6, req);
5214 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5215 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5217 sprintf(buf_i++, "\n");
5218 SLIST_FOREACH(f, &vsi->ftl, next) {
5220 MAC_FORMAT ", vlan %4d, flags %#06x",
5221 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5223 /* don't print '\n' for last entry */
5224 if (++ftl_counter != ftl_len) {
5225 sprintf(buf_i, "\n");
5230 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5232 printf("sysctl error: %d\n", error);
5233 free(buf, M_DEVBUF);
5237 #define IXL_SW_RES_SIZE 0x14
5239 ixl_res_alloc_cmp(const void *a, const void *b)
5241 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5242 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5243 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5245 return ((int)one->resource_type - (int)two->resource_type);
5249 * Longest string length: 25
5252 ixl_switch_res_type_string(u8 type)
5254 char * ixl_switch_res_type_strings[0x14] = {
5257 "Perfect Match MAC address",
5260 "Multicast hash entry",
5261 "Unicast hash entry",
5265 "VLAN Statistic Pool",
5268 "Inner VLAN Forward filter",
5278 return ixl_switch_res_type_strings[type];
5280 return "(Reserved)";
5284 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5286 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5287 struct i40e_hw *hw = &pf->hw;
5288 device_t dev = pf->dev;
5290 enum i40e_status_code status;
5294 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5296 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5298 device_printf(dev, "Could not allocate sbuf for output.\n");
5302 bzero(resp, sizeof(resp));
5303 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5309 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5310 __func__, i40e_stat_str(hw, status),
5311 i40e_aq_str(hw, hw->aq.asq_last_status));
5316 /* Sort entries by type for display */
5317 qsort(resp, num_entries,
5318 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5319 &ixl_res_alloc_cmp);
5321 sbuf_cat(buf, "\n");
5322 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5324 " Type | Guaranteed | Total | Used | Un-allocated\n"
5325 " | (this) | (all) | (this) | (all) \n");
5326 for (int i = 0; i < num_entries; i++) {
5328 "%25s | %10d %5d %6d %12d",
5329 ixl_switch_res_type_string(resp[i].resource_type),
5333 resp[i].total_unalloced);
5334 if (i < num_entries - 1)
5335 sbuf_cat(buf, "\n");
5338 error = sbuf_finish(buf);
5340 device_printf(dev, "Error finishing sbuf: %d\n", error);
5347 ** Caller must init and delete sbuf; this function will clear and
5348 ** finish it for caller.
5350 ** XXX: Cannot use the SEID for this, since there is no longer a
5351 ** fixed mapping between SEID and element type.
5354 ixl_switch_element_string(struct sbuf *s,
5355 struct i40e_aqc_switch_config_element_resp *element)
5359 switch (element->element_type) {
5360 case I40E_AQ_SW_ELEM_TYPE_MAC:
5361 sbuf_printf(s, "MAC %3d", element->element_info);
5363 case I40E_AQ_SW_ELEM_TYPE_PF:
5364 sbuf_printf(s, "PF %3d", element->element_info);
5366 case I40E_AQ_SW_ELEM_TYPE_VF:
5367 sbuf_printf(s, "VF %3d", element->element_info);
5369 case I40E_AQ_SW_ELEM_TYPE_EMP:
5372 case I40E_AQ_SW_ELEM_TYPE_BMC:
5375 case I40E_AQ_SW_ELEM_TYPE_PV:
5378 case I40E_AQ_SW_ELEM_TYPE_VEB:
5381 case I40E_AQ_SW_ELEM_TYPE_PA:
5384 case I40E_AQ_SW_ELEM_TYPE_VSI:
5385 sbuf_printf(s, "VSI %3d", element->element_info);
5393 return sbuf_data(s);
5397 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5399 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5400 struct i40e_hw *hw = &pf->hw;
5401 device_t dev = pf->dev;
5404 enum i40e_status_code status;
5407 u8 aq_buf[I40E_AQ_LARGE_BUF];
5409 struct i40e_aqc_get_switch_config_resp *sw_config;
5410 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5412 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5414 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5418 status = i40e_aq_get_switch_config(hw, sw_config,
5419 sizeof(aq_buf), &next, NULL);
5422 "%s: aq_get_switch_config() error %s, aq error %s\n",
5423 __func__, i40e_stat_str(hw, status),
5424 i40e_aq_str(hw, hw->aq.asq_last_status));
5429 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5432 nmbuf = sbuf_new_auto();
5434 device_printf(dev, "Could not allocate sbuf for name output.\n");
5439 sbuf_cat(buf, "\n");
5440 /* Assuming <= 255 elements in switch */
5441 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5442 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5444 ** Revision -- all elements are revision 1 for now
5447 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5448 " | | | (uplink)\n");
5449 for (int i = 0; i < sw_config->header.num_reported; i++) {
5450 // "%4d (%8s) | %8s %8s %#8x",
5451 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5453 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5454 &sw_config->element[i]));
5455 sbuf_cat(buf, " | ");
5456 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5458 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5460 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5461 if (i < sw_config->header.num_reported - 1)
5462 sbuf_cat(buf, "\n");
5466 error = sbuf_finish(buf);
5468 device_printf(dev, "Error finishing sbuf: %d\n", error);
5476 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5478 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5479 struct i40e_hw *hw = &pf->hw;
5480 device_t dev = pf->dev;
5483 enum i40e_status_code status;
5486 struct i40e_aqc_get_set_rss_key_data key_data;
5488 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5490 device_printf(dev, "Could not allocate sbuf for output.\n");
5494 sbuf_cat(buf, "\n");
5495 if (hw->mac.type == I40E_MAC_X722) {
5496 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5497 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5499 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5500 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5501 sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5503 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5504 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5505 sbuf_printf(buf, "%4D", (u_char *)®, "");
5509 error = sbuf_finish(buf);
5511 device_printf(dev, "Error finishing sbuf: %d\n", error);
5518 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5520 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5521 struct i40e_hw *hw = &pf->hw;
5522 device_t dev = pf->dev;
5525 enum i40e_status_code status;
5529 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5531 device_printf(dev, "Could not allocate sbuf for output.\n");
5535 sbuf_cat(buf, "\n");
5536 if (hw->mac.type == I40E_MAC_X722) {
5537 bzero(hlut, sizeof(hlut));
5538 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5540 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5541 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5542 sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5544 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5545 reg = rd32(hw, I40E_PFQF_HLUT(i));
5546 sbuf_printf(buf, "%4D", (u_char *)®, "");
5550 error = sbuf_finish(buf);
5552 device_printf(dev, "Error finishing sbuf: %d\n", error);