1 /******************************************************************************
3 Copyright (c) 2013-2019, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "sys/limits.h"
40 /*********************************************************************
42 *********************************************************************/
43 #define IXLV_DRIVER_VERSION_MAJOR 1
44 #define IXLV_DRIVER_VERSION_MINOR 5
45 #define IXLV_DRIVER_VERSION_BUILD 8
47 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
48 __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
49 __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
51 /*********************************************************************
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixlv_strings
56 * Last entry must be all 0s
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
61 static ixl_vendor_info_t ixlv_vendor_info_array[] =
63 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
66 /* required last entry */
70 /*********************************************************************
71 * Table of branding strings
72 *********************************************************************/
74 static char *ixlv_strings[] = {
75 "Intel(R) Ethernet Connection 700 Series VF Driver"
79 /*********************************************************************
81 *********************************************************************/
82 static int ixlv_probe(device_t);
83 static int ixlv_attach(device_t);
84 static int ixlv_detach(device_t);
85 static int ixlv_shutdown(device_t);
86 static void ixlv_init_locked(struct ixlv_sc *);
87 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
88 static void ixlv_free_pci_resources(struct ixlv_sc *);
89 static int ixlv_assign_msix(struct ixlv_sc *);
90 static int ixlv_init_msix(struct ixlv_sc *);
91 static int ixlv_init_taskqueue(struct ixlv_sc *);
92 static int ixlv_setup_queues(struct ixlv_sc *);
93 static void ixlv_config_rss(struct ixlv_sc *);
94 static void ixlv_stop(struct ixlv_sc *);
95 static void ixlv_add_multi(struct ixl_vsi *);
96 static void ixlv_del_multi(struct ixl_vsi *);
97 static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
98 static void ixlv_free_queues(struct ixl_vsi *);
99 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
100 static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
102 static int ixlv_media_change(struct ifnet *);
103 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
105 static void ixlv_local_timer(void *);
107 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
108 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
109 static void ixlv_init_filters(struct ixlv_sc *);
110 static void ixlv_free_filters(struct ixlv_sc *);
112 static void ixlv_msix_que(void *);
113 static void ixlv_msix_adminq(void *);
114 static void ixlv_do_adminq(void *, int);
115 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
116 static void ixlv_handle_que(void *, int);
117 static int ixlv_reset(struct ixlv_sc *);
118 static int ixlv_reset_complete(struct i40e_hw *);
119 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
120 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
121 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
122 enum i40e_status_code);
123 static void ixlv_configure_itr(struct ixlv_sc *);
125 static void ixlv_enable_adminq_irq(struct i40e_hw *);
126 static void ixlv_disable_adminq_irq(struct i40e_hw *);
127 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
128 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
130 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
131 static void ixlv_register_vlan(void *, struct ifnet *, u16);
132 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
134 static void ixlv_init_hw(struct ixlv_sc *);
135 static int ixlv_setup_vc(struct ixlv_sc *);
136 static int ixlv_vf_config(struct ixlv_sc *);
138 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
139 struct ifnet *, int);
141 static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
142 static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
144 static void ixlv_add_sysctls(struct ixlv_sc *);
146 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
147 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
150 /*********************************************************************
151 * FreeBSD Device Interface Entry Points
152 *********************************************************************/
154 static device_method_t ixlv_methods[] = {
155 /* Device interface */
156 DEVMETHOD(device_probe, ixlv_probe),
157 DEVMETHOD(device_attach, ixlv_attach),
158 DEVMETHOD(device_detach, ixlv_detach),
159 DEVMETHOD(device_shutdown, ixlv_shutdown),
163 static driver_t ixlv_driver = {
164 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
167 devclass_t ixlv_devclass;
168 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
170 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
171 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
174 ** TUNEABLE PARAMETERS:
177 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
178 "IXLV driver parameters");
181 ** Number of descriptors per ring:
182 ** - TX and RX sizes are independently configurable
184 static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
185 TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
186 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
187 &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
189 static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
190 TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
192 &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
194 /* Set to zero to auto calculate */
195 int ixlv_max_queues = 0;
196 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
197 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
198 &ixlv_max_queues, 0, "Number of Queues");
201 ** Number of entries in Tx queue buf_ring.
202 ** Increasing this will reduce the number of
203 ** errors when transmitting fragmented UDP
206 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
207 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
208 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
209 &ixlv_txbrsz, 0, "TX Buf Ring Size");
212 * Different method for processing TX descriptor
215 static int ixlv_enable_head_writeback = 0;
216 TUNABLE_INT("hw.ixlv.enable_head_writeback",
217 &ixlv_enable_head_writeback);
218 SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
219 &ixlv_enable_head_writeback, 0,
220 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
223 ** Controls for Interrupt Throttling
224 ** - true/false for dynamic adjustment
225 ** - default values for static ITR
227 int ixlv_dynamic_rx_itr = 0;
228 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
229 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
230 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
232 int ixlv_dynamic_tx_itr = 0;
233 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
234 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
235 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
237 int ixlv_rx_itr = IXL_ITR_8K;
238 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
239 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
240 &ixlv_rx_itr, 0, "RX Interrupt Rate");
242 int ixlv_tx_itr = IXL_ITR_4K;
243 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
244 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
245 &ixlv_tx_itr, 0, "TX Interrupt Rate");
248 /*********************************************************************
249 * Device identification routine
251 * ixlv_probe determines if the driver should be loaded on
252 * the hardware based on PCI vendor/device id of the device.
254 * return BUS_PROBE_DEFAULT on success, positive on failure
255 *********************************************************************/
258 ixlv_probe(device_t dev)
260 ixl_vendor_info_t *ent;
262 u16 pci_vendor_id, pci_device_id;
263 u16 pci_subvendor_id, pci_subdevice_id;
264 char device_name[256];
267 INIT_DEBUGOUT("ixlv_probe: begin");
270 pci_vendor_id = pci_get_vendor(dev);
271 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
274 pci_device_id = pci_get_device(dev);
275 pci_subvendor_id = pci_get_subvendor(dev);
276 pci_subdevice_id = pci_get_subdevice(dev);
278 ent = ixlv_vendor_info_array;
279 while (ent->vendor_id != 0) {
280 if ((pci_vendor_id == ent->vendor_id) &&
281 (pci_device_id == ent->device_id) &&
283 ((pci_subvendor_id == ent->subvendor_id) ||
284 (ent->subvendor_id == 0)) &&
286 ((pci_subdevice_id == ent->subdevice_id) ||
287 (ent->subdevice_id == 0))) {
288 sprintf(device_name, "%s, Version - %s",
289 ixlv_strings[ent->index],
290 ixlv_driver_version);
291 device_set_desc_copy(dev, device_name);
292 return (BUS_PROBE_DEFAULT);
299 /*********************************************************************
300 * Device initialization routine
302 * The attach entry point is called when the driver is being loaded.
303 * This routine identifies the type of hardware, allocates all resources
304 * and initializes the hardware.
306 * return 0 on success, positive on failure
307 *********************************************************************/
310 ixlv_attach(device_t dev)
317 INIT_DBG_DEV(dev, "begin");
319 /* Allocate, clear, and link in our primary soft structure */
320 sc = device_get_softc(dev);
321 sc->dev = sc->osdep.dev = dev;
326 /* Initialize hw struct */
329 /* Allocate filter lists */
330 ixlv_init_filters(sc);
332 /* Save this tunable */
333 vsi->enable_head_writeback = ixlv_enable_head_writeback;
336 mtx_init(&sc->mtx, device_get_nameunit(dev),
337 "IXL SC Lock", MTX_DEF);
339 /* Set up the timer callout */
340 callout_init_mtx(&sc->timer, &sc->mtx, 0);
342 /* Do PCI setup - map BAR0, etc */
343 if (ixlv_allocate_pci_resources(sc)) {
344 device_printf(dev, "%s: Allocation of PCI resources failed\n",
350 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
352 error = i40e_set_mac_type(hw);
354 device_printf(dev, "%s: set_mac_type failed: %d\n",
359 error = ixlv_reset_complete(hw);
361 device_printf(dev, "%s: Device is still being reset\n",
366 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
368 error = ixlv_setup_vc(sc);
370 device_printf(dev, "%s: Error setting up PF comms, %d\n",
375 INIT_DBG_DEV(dev, "PF API version verified");
377 /* Need API version before sending reset message */
378 error = ixlv_reset(sc);
380 device_printf(dev, "VF reset failed; reload the driver\n");
384 INIT_DBG_DEV(dev, "VF reset complete");
386 /* Ask for VF config from PF */
387 error = ixlv_vf_config(sc);
389 device_printf(dev, "Error getting configuration from PF: %d\n",
394 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
395 sc->vf_res->num_vsis,
396 sc->vf_res->num_queue_pairs,
397 sc->vf_res->max_vectors,
398 sc->vf_res->rss_key_size,
399 sc->vf_res->rss_lut_size);
401 device_printf(dev, "Offload flags: 0x%b\n",
402 sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
405 /* got VF config message back from PF, now we can parse it */
406 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
407 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
408 sc->vsi_res = &sc->vf_res->vsi_res[i];
411 device_printf(dev, "%s: no LAN VSI found\n", __func__);
416 INIT_DBG_DEV(dev, "Resource Acquisition complete");
418 /* If no mac address was assigned just make a random one */
419 if (!ixlv_check_ether_addr(hw->mac.addr)) {
420 u8 addr[ETHER_ADDR_LEN];
421 arc4rand(&addr, sizeof(addr), 0);
424 bcopy(addr, hw->mac.addr, sizeof(addr));
427 /* Now that the number of queues for this VF is known, set up interrupts */
428 sc->msix = ixlv_init_msix(sc);
429 /* We fail without MSIX support */
435 vsi->id = sc->vsi_res->vsi_id;
436 vsi->back = (void *)sc;
437 vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
439 ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
441 /* This allocates the memory and early settings */
442 if (ixlv_setup_queues(sc) != 0) {
443 device_printf(dev, "%s: setup queues failed!\n",
449 /* Do queue interrupt setup */
450 if (ixlv_assign_msix(sc) != 0) {
451 device_printf(dev, "%s: allocating queue interrupts failed!\n",
457 INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
459 /* Setup the stack interface */
460 if (ixlv_setup_interface(dev, sc) != 0) {
461 device_printf(dev, "%s: setup interface failed!\n",
467 INIT_DBG_DEV(dev, "Interface setup complete");
469 /* Start AdminQ taskqueue */
470 ixlv_init_taskqueue(sc);
472 /* We expect a link state message, so schedule the AdminQ task now */
473 taskqueue_enqueue(sc->tq, &sc->aq_irq);
475 /* Initialize stats */
476 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
477 ixlv_add_sysctls(sc);
479 /* Register for VLAN events */
480 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
481 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
482 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
483 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
485 /* We want AQ enabled early */
486 ixlv_enable_adminq_irq(hw);
488 /* Set things up to run init */
489 sc->init_state = IXLV_INIT_READY;
491 ixl_vc_init_mgr(sc, &sc->vc_mgr);
493 INIT_DBG_DEV(dev, "end");
497 ixlv_free_queues(vsi);
498 ixlv_teardown_adminq_msix(sc);
500 free(sc->vf_res, M_DEVBUF);
502 i40e_shutdown_adminq(hw);
504 ixlv_free_pci_resources(sc);
506 mtx_destroy(&sc->mtx);
507 ixlv_free_filters(sc);
508 INIT_DBG_DEV(dev, "end: error %d", error);
512 /*********************************************************************
513 * Device removal routine
515 * The detach entry point is called when the driver is being removed.
516 * This routine stops the adapter and deallocates all the resources
517 * that were allocated for driver operation.
519 * return 0 on success, positive on failure
520 *********************************************************************/
523 ixlv_detach(device_t dev)
525 struct ixlv_sc *sc = device_get_softc(dev);
526 struct ixl_vsi *vsi = &sc->vsi;
527 struct i40e_hw *hw = &sc->hw;
528 enum i40e_status_code status;
530 INIT_DBG_DEV(dev, "begin");
532 /* Make sure VLANS are not using driver */
533 if (vsi->ifp->if_vlantrunk != NULL) {
534 if_printf(vsi->ifp, "Vlan in use, detach first\n");
538 /* Remove all the media and link information */
539 ifmedia_removeall(&sc->media);
542 ether_ifdetach(vsi->ifp);
543 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
546 mtx_unlock(&sc->mtx);
549 /* Unregister VLAN events */
550 if (vsi->vlan_attach != NULL)
551 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
552 if (vsi->vlan_detach != NULL)
553 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
556 callout_drain(&sc->vc_mgr.callout);
558 ixlv_disable_adminq_irq(hw);
559 ixlv_teardown_adminq_msix(sc);
560 /* Drain admin queue taskqueue */
561 taskqueue_free(sc->tq);
562 status = i40e_shutdown_adminq(&sc->hw);
563 if (status != I40E_SUCCESS) {
565 "i40e_shutdown_adminq() failed with status %s\n",
566 i40e_stat_str(hw, status));
570 free(sc->vf_res, M_DEVBUF);
571 ixlv_free_queues(vsi);
572 ixlv_free_pci_resources(sc);
573 ixlv_free_filters(sc);
575 bus_generic_detach(dev);
576 mtx_destroy(&sc->mtx);
577 INIT_DBG_DEV(dev, "end");
581 /*********************************************************************
583 * Shutdown entry point
585 **********************************************************************/
588 ixlv_shutdown(device_t dev)
590 struct ixlv_sc *sc = device_get_softc(dev);
592 INIT_DBG_DEV(dev, "begin");
596 mtx_unlock(&sc->mtx);
598 INIT_DBG_DEV(dev, "end");
603 * Configure TXCSUM(IPV6) and TSO(4/6)
604 * - the hardware handles these together so we
608 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
610 /* Enable/disable TXCSUM/TSO4 */
611 if (!(ifp->if_capenable & IFCAP_TXCSUM)
612 && !(ifp->if_capenable & IFCAP_TSO4)) {
613 if (mask & IFCAP_TXCSUM) {
614 ifp->if_capenable |= IFCAP_TXCSUM;
615 /* enable TXCSUM, restore TSO if previously enabled */
616 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
617 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
618 ifp->if_capenable |= IFCAP_TSO4;
621 else if (mask & IFCAP_TSO4) {
622 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
623 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
625 "TSO4 requires txcsum, enabling both...\n");
627 } else if((ifp->if_capenable & IFCAP_TXCSUM)
628 && !(ifp->if_capenable & IFCAP_TSO4)) {
629 if (mask & IFCAP_TXCSUM)
630 ifp->if_capenable &= ~IFCAP_TXCSUM;
631 else if (mask & IFCAP_TSO4)
632 ifp->if_capenable |= IFCAP_TSO4;
633 } else if((ifp->if_capenable & IFCAP_TXCSUM)
634 && (ifp->if_capenable & IFCAP_TSO4)) {
635 if (mask & IFCAP_TXCSUM) {
636 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
637 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
639 "TSO4 requires txcsum, disabling both...\n");
640 } else if (mask & IFCAP_TSO4)
641 ifp->if_capenable &= ~IFCAP_TSO4;
644 /* Enable/disable TXCSUM_IPV6/TSO6 */
645 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
646 && !(ifp->if_capenable & IFCAP_TSO6)) {
647 if (mask & IFCAP_TXCSUM_IPV6) {
648 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
649 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
650 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
651 ifp->if_capenable |= IFCAP_TSO6;
653 } else if (mask & IFCAP_TSO6) {
654 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
655 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
657 "TSO6 requires txcsum6, enabling both...\n");
659 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
660 && !(ifp->if_capenable & IFCAP_TSO6)) {
661 if (mask & IFCAP_TXCSUM_IPV6)
662 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
663 else if (mask & IFCAP_TSO6)
664 ifp->if_capenable |= IFCAP_TSO6;
665 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
666 && (ifp->if_capenable & IFCAP_TSO6)) {
667 if (mask & IFCAP_TXCSUM_IPV6) {
668 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
669 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
671 "TSO6 requires txcsum6, disabling both...\n");
672 } else if (mask & IFCAP_TSO6)
673 ifp->if_capenable &= ~IFCAP_TSO6;
677 /*********************************************************************
680 * ixlv_ioctl is called when the user wants to configure the
683 * return 0 on success, positive on failure
684 **********************************************************************/
687 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
689 struct ixl_vsi *vsi = ifp->if_softc;
690 struct ixlv_sc *sc = vsi->back;
691 struct ifreq *ifr = (struct ifreq *)data;
692 #if defined(INET) || defined(INET6)
693 struct ifaddr *ifa = (struct ifaddr *)data;
694 bool avoid_reset = FALSE;
703 if (ifa->ifa_addr->sa_family == AF_INET)
707 if (ifa->ifa_addr->sa_family == AF_INET6)
710 #if defined(INET) || defined(INET6)
712 ** Calling init results in link renegotiation,
713 ** so we avoid doing it when possible.
716 ifp->if_flags |= IFF_UP;
717 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
724 error = ether_ioctl(ifp, command, data);
728 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
730 if (ifr->ifr_mtu > IXL_MAX_FRAME -
731 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
733 IOCTL_DBG_IF(ifp, "mtu too large");
735 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
736 // ERJ: Interestingly enough, these types don't match
737 ifp->if_mtu = (u_long)ifr->ifr_mtu;
738 vsi->max_frame_size =
739 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
740 + ETHER_VLAN_ENCAP_LEN;
741 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
742 ixlv_init_locked(sc);
744 mtx_unlock(&sc->mtx);
747 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
749 if (ifp->if_flags & IFF_UP) {
750 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
751 ixlv_init_locked(sc);
753 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
755 sc->if_flags = ifp->if_flags;
756 mtx_unlock(&sc->mtx);
759 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
762 ixlv_disable_intr(vsi);
764 ixlv_enable_intr(vsi);
765 mtx_unlock(&sc->mtx);
769 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
770 if (sc->init_state == IXLV_RUNNING) {
772 ixlv_disable_intr(vsi);
774 ixlv_enable_intr(vsi);
775 mtx_unlock(&sc->mtx);
780 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
781 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
785 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
786 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
788 ixlv_cap_txcsum_tso(vsi, ifp, mask);
790 if (mask & IFCAP_RXCSUM)
791 ifp->if_capenable ^= IFCAP_RXCSUM;
792 if (mask & IFCAP_RXCSUM_IPV6)
793 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
794 if (mask & IFCAP_LRO)
795 ifp->if_capenable ^= IFCAP_LRO;
796 if (mask & IFCAP_VLAN_HWTAGGING)
797 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
798 if (mask & IFCAP_VLAN_HWFILTER)
799 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
800 if (mask & IFCAP_VLAN_HWTSO)
801 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
802 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
805 VLAN_CAPABILITIES(ifp);
811 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
812 error = ether_ioctl(ifp, command, data);
820 ** To do a reinit on the VF is unfortunately more complicated
821 ** than a physical device, we must have the PF more or less
822 ** completely recreate our memory, so many things that were
823 ** done only once at attach in traditional drivers now must be
824 ** redone at each reinitialization. This function does that
825 ** 'prelude' so we can then call the normal locked init code.
828 ixlv_reinit_locked(struct ixlv_sc *sc)
830 struct i40e_hw *hw = &sc->hw;
831 struct ixl_vsi *vsi = &sc->vsi;
832 struct ifnet *ifp = vsi->ifp;
833 struct ixlv_mac_filter *mf, *mf_temp;
834 struct ixlv_vlan_filter *vf;
837 INIT_DBG_IF(ifp, "begin");
839 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
842 error = ixlv_reset(sc);
844 INIT_DBG_IF(ifp, "VF was reset");
846 /* set the state in case we went thru RESET */
847 sc->init_state = IXLV_RUNNING;
850 ** Resetting the VF drops all filters from hardware;
851 ** we need to mark them to be re-added in init.
853 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
854 if (mf->flags & IXL_FILTER_DEL) {
855 SLIST_REMOVE(sc->mac_filters, mf,
856 ixlv_mac_filter, next);
859 mf->flags |= IXL_FILTER_ADD;
861 if (vsi->num_vlans != 0)
862 SLIST_FOREACH(vf, sc->vlan_filters, next)
863 vf->flags = IXL_FILTER_ADD;
864 else { /* clean any stale filters */
865 while (!SLIST_EMPTY(sc->vlan_filters)) {
866 vf = SLIST_FIRST(sc->vlan_filters);
867 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
872 ixlv_enable_adminq_irq(hw);
873 ixl_vc_flush(&sc->vc_mgr);
875 INIT_DBG_IF(ifp, "end");
880 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
881 enum i40e_status_code code)
888 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
889 * happens while a command is in progress, so we don't print an error
892 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
893 if_printf(sc->vsi.ifp,
894 "Error %s waiting for PF to complete operation %d\n",
895 i40e_stat_str(&sc->hw, code), cmd->request);
900 ixlv_init_locked(struct ixlv_sc *sc)
902 struct i40e_hw *hw = &sc->hw;
903 struct ixl_vsi *vsi = &sc->vsi;
904 struct ixl_queue *que = vsi->queues;
905 struct ifnet *ifp = vsi->ifp;
908 INIT_DBG_IF(ifp, "begin");
910 IXLV_CORE_LOCK_ASSERT(sc);
912 /* Do a reinit first if an init has already been done */
913 if ((sc->init_state == IXLV_RUNNING) ||
914 (sc->init_state == IXLV_RESET_REQUIRED) ||
915 (sc->init_state == IXLV_RESET_PENDING))
916 error = ixlv_reinit_locked(sc);
917 /* Don't bother with init if we failed reinit */
921 /* Remove existing MAC filter if new MAC addr is set */
922 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
923 error = ixlv_del_mac_filter(sc, hw->mac.addr);
925 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
926 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
930 /* Check for an LAA mac address... */
931 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
933 ifp->if_hwassist = 0;
934 if (ifp->if_capenable & IFCAP_TSO)
935 ifp->if_hwassist |= CSUM_TSO;
936 if (ifp->if_capenable & IFCAP_TXCSUM)
937 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
938 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
939 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
941 /* Add mac filter for this VF to PF */
942 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
943 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
944 if (!error || error == EEXIST)
945 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
946 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
950 /* Setup vlan's if needed */
951 ixlv_setup_vlan_filters(sc);
953 /* Prepare the queues for operation */
954 for (int i = 0; i < vsi->num_queues; i++, que++) {
955 struct rx_ring *rxr = &que->rxr;
957 ixl_init_tx_ring(que);
959 if (vsi->max_frame_size <= MCLBYTES)
960 rxr->mbuf_sz = MCLBYTES;
962 rxr->mbuf_sz = MJUMPAGESIZE;
963 ixl_init_rx_ring(que);
966 /* Set initial ITR values */
967 ixlv_configure_itr(sc);
969 /* Configure queues */
970 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
971 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
977 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
978 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
981 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
982 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
984 /* Start the local timer */
985 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
987 sc->init_state = IXLV_RUNNING;
990 INIT_DBG_IF(ifp, "end");
995 ** Init entry point for the stack
1000 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
1001 struct ixlv_sc *sc = vsi->back;
1004 /* Prevent init from running again while waiting for AQ calls
1005 * made in init_locked() to complete. */
1007 if (sc->init_in_progress) {
1008 mtx_unlock(&sc->mtx);
1011 sc->init_in_progress = true;
1013 ixlv_init_locked(sc);
1014 mtx_unlock(&sc->mtx);
1016 /* Wait for init_locked to finish */
1017 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
1018 && ++retries < IXLV_MAX_INIT_WAIT) {
1019 i40e_msec_pause(25);
1021 if (retries >= IXLV_MAX_INIT_WAIT) {
1023 "Init failed to complete in allotted time!\n");
1027 sc->init_in_progress = false;
1028 mtx_unlock(&sc->mtx);
1032 * ixlv_attach() helper function; gathers information about
1033 * the (virtual) hardware for use elsewhere in the driver.
1036 ixlv_init_hw(struct ixlv_sc *sc)
1038 struct i40e_hw *hw = &sc->hw;
1039 device_t dev = sc->dev;
1041 /* Save off the information about this board */
1042 hw->vendor_id = pci_get_vendor(dev);
1043 hw->device_id = pci_get_device(dev);
1044 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1045 hw->subsystem_vendor_id =
1046 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1047 hw->subsystem_device_id =
1048 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1050 hw->bus.device = pci_get_slot(dev);
1051 hw->bus.func = pci_get_function(dev);
1055 * ixlv_attach() helper function; initalizes the admin queue
1056 * and attempts to establish contact with the PF by
1057 * retrying the initial "API version" message several times
1058 * or until the PF responds.
1061 ixlv_setup_vc(struct ixlv_sc *sc)
1063 struct i40e_hw *hw = &sc->hw;
1064 device_t dev = sc->dev;
1065 int error = 0, ret_error = 0, asq_retries = 0;
1066 bool send_api_ver_retried = 0;
1068 /* Need to set these AQ paramters before initializing AQ */
1069 hw->aq.num_arq_entries = IXL_AQ_LEN;
1070 hw->aq.num_asq_entries = IXL_AQ_LEN;
1071 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1072 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1074 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1075 /* Initialize admin queue */
1076 error = i40e_init_adminq(hw);
1078 device_printf(dev, "%s: init_adminq failed: %d\n",
1084 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1085 " send_api_ver attempt %d", i+1);
1088 /* Send VF's API version */
1089 error = ixlv_send_api_ver(sc);
1091 i40e_shutdown_adminq(hw);
1093 device_printf(dev, "%s: unable to send api"
1094 " version to PF on attempt %d, error %d\n",
1095 __func__, i+1, error);
1099 while (!i40e_asq_done(hw)) {
1100 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1101 i40e_shutdown_adminq(hw);
1102 device_printf(dev, "Admin Queue timeout "
1103 "(waiting for send_api_ver), %d more tries...\n",
1104 IXLV_AQ_MAX_ERR - (i + 1));
1108 i40e_msec_pause(10);
1110 if (asq_retries > IXLV_AQ_MAX_ERR)
1113 INIT_DBG_DEV(dev, "Sent API version message to PF");
1115 /* Verify that the VF accepts the PF's API version */
1116 error = ixlv_verify_api_ver(sc);
1117 if (error == ETIMEDOUT) {
1118 if (!send_api_ver_retried) {
1119 /* Resend message, one more time */
1120 send_api_ver_retried = true;
1122 "%s: Timeout while verifying API version on first"
1123 " try!\n", __func__);
1127 "%s: Timeout while verifying API version on second"
1128 " try!\n", __func__);
1135 "%s: Unable to verify API version,"
1136 " error %s\n", __func__, i40e_stat_str(hw, error));
1143 i40e_shutdown_adminq(hw);
1148 * ixlv_attach() helper function; asks the PF for this VF's
1149 * configuration, and saves the information if it receives it.
1152 ixlv_vf_config(struct ixlv_sc *sc)
1154 struct i40e_hw *hw = &sc->hw;
1155 device_t dev = sc->dev;
1156 int bufsz, error = 0, ret_error = 0;
1157 int asq_retries, retried = 0;
1160 error = ixlv_send_vf_config_msg(sc);
1163 "%s: Unable to send VF config request, attempt %d,"
1164 " error %d\n", __func__, retried + 1, error);
1169 while (!i40e_asq_done(hw)) {
1170 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1171 device_printf(dev, "%s: Admin Queue timeout "
1172 "(waiting for send_vf_config_msg), attempt %d\n",
1173 __func__, retried + 1);
1177 i40e_msec_pause(10);
1180 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1184 bufsz = sizeof(struct virtchnl_vf_resource) +
1185 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1186 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1189 "%s: Unable to allocate memory for VF configuration"
1190 " message from PF on attempt %d\n", __func__, retried + 1);
1196 /* Check for VF config response */
1197 error = ixlv_get_vf_config(sc);
1198 if (error == ETIMEDOUT) {
1199 /* The 1st time we timeout, send the configuration message again */
1205 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1210 "%s: Unable to get VF configuration from PF after %d tries!\n",
1211 __func__, retried + 1);
1217 free(sc->vf_res, M_DEVBUF);
1223 * Allocate MSI/X vectors, setup the AQ vector early
1226 ixlv_init_msix(struct ixlv_sc *sc)
1228 device_t dev = sc->dev;
1229 int rid, want, vectors, queues, available;
1230 int auto_max_queues;
1232 rid = PCIR_BAR(IXL_MSIX_BAR);
1233 sc->msix_mem = bus_alloc_resource_any(dev,
1234 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1235 if (!sc->msix_mem) {
1236 /* May not be enabled */
1237 device_printf(sc->dev,
1238 "Unable to map MSIX table\n");
1242 available = pci_msix_count(dev);
1243 if (available == 0) { /* system has msix disabled */
1244 bus_release_resource(dev, SYS_RES_MEMORY,
1246 sc->msix_mem = NULL;
1250 /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1251 auto_max_queues = min(mp_ncpus, available - 1);
1252 /* Clamp queues to # assigned to VF by PF */
1253 auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1255 /* Override with tunable value if tunable is less than autoconfig count */
1256 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1257 queues = ixlv_max_queues;
1258 /* Use autoconfig amount if that's lower */
1259 else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1260 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1261 "autoconfig amount (%d)...\n",
1262 ixlv_max_queues, auto_max_queues);
1263 queues = auto_max_queues;
1265 /* Limit maximum auto-configured queues to 8 if no user value is set */
1267 queues = min(auto_max_queues, 8);
1270 /* If we're doing RSS, clamp at the number of RSS buckets */
1271 if (queues > rss_getnumbuckets())
1272 queues = rss_getnumbuckets();
1276 ** Want one vector (RX/TX pair) per queue
1277 ** plus an additional for the admin queue.
1280 if (want <= available) /* Have enough */
1283 device_printf(sc->dev,
1284 "MSIX Configuration Problem, "
1285 "%d vectors available but %d wanted!\n",
1292 * If we're doing RSS, the number of queues needs to
1293 * match the number of RSS buckets that are configured.
1295 * + If there's more queues than RSS buckets, we'll end
1296 * up with queues that get no traffic.
1298 * + If there's more RSS buckets than queues, we'll end
1299 * up having multiple RSS buckets map to the same queue,
1300 * so there'll be some contention.
1302 if (queues != rss_getnumbuckets()) {
1304 "%s: queues (%d) != RSS buckets (%d)"
1305 "; performance will be impacted.\n",
1306 __func__, queues, rss_getnumbuckets());
1310 if (pci_alloc_msix(dev, &vectors) == 0) {
1311 device_printf(sc->dev,
1312 "Using MSIX interrupts with %d vectors\n", vectors);
1314 sc->vsi.num_queues = queues;
1317 /* Next we need to setup the vector for the Admin Queue */
1318 rid = 1; /* zero vector + 1 */
1319 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1320 &rid, RF_SHAREABLE | RF_ACTIVE);
1321 if (sc->res == NULL) {
1322 device_printf(dev, "Unable to allocate"
1323 " bus resource: AQ interrupt \n");
1326 if (bus_setup_intr(dev, sc->res,
1327 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1328 ixlv_msix_adminq, sc, &sc->tag)) {
1330 device_printf(dev, "Failed to register AQ handler");
1333 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1338 /* The VF driver MUST use MSIX */
1343 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1346 device_t dev = sc->dev;
1349 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1352 if (!(sc->pci_mem)) {
1353 device_printf(dev, "Unable to allocate bus resource: memory\n");
1357 sc->osdep.mem_bus_space_tag =
1358 rman_get_bustag(sc->pci_mem);
1359 sc->osdep.mem_bus_space_handle =
1360 rman_get_bushandle(sc->pci_mem);
1361 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1362 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1363 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1364 sc->hw.back = &sc->osdep;
1366 ixl_set_busmaster(dev);
1367 ixl_set_msix_enable(dev);
1369 /* Disable adminq interrupts (just in case) */
1370 ixlv_disable_adminq_irq(&sc->hw);
1376 * Free MSI-X related resources for a single queue
1379 ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
1381 device_t dev = sc->dev;
1384 ** Release all msix queue resources:
1386 if (que->tag != NULL) {
1387 bus_teardown_intr(dev, que->res, que->tag);
1390 if (que->res != NULL) {
1391 int rid = que->msix + 1;
1392 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1395 if (que->tq != NULL) {
1396 taskqueue_free(que->tq);
1402 ixlv_free_pci_resources(struct ixlv_sc *sc)
1404 device_t dev = sc->dev;
1406 pci_release_msi(dev);
1408 if (sc->msix_mem != NULL)
1409 bus_release_resource(dev, SYS_RES_MEMORY,
1410 PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1412 if (sc->pci_mem != NULL)
1413 bus_release_resource(dev, SYS_RES_MEMORY,
1414 PCIR_BAR(0), sc->pci_mem);
1418 * Create taskqueue and tasklet for Admin Queue interrupts.
1421 ixlv_init_taskqueue(struct ixlv_sc *sc)
1425 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1427 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1428 taskqueue_thread_enqueue, &sc->tq);
1429 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1430 device_get_nameunit(sc->dev));
1435 /*********************************************************************
1437 * Setup MSIX Interrupt resources and handlers for the VSI queues
1439 **********************************************************************/
1441 ixlv_assign_msix(struct ixlv_sc *sc)
1443 device_t dev = sc->dev;
1444 struct ixl_vsi *vsi = &sc->vsi;
1445 struct ixl_queue *que = vsi->queues;
1446 struct tx_ring *txr;
1447 int error, rid, vector = 1;
1452 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1456 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1457 RF_SHAREABLE | RF_ACTIVE);
1458 if (que->res == NULL) {
1459 device_printf(dev,"Unable to allocate"
1460 " bus resource: que interrupt [%d]\n", vector);
1463 /* Set the handler function */
1464 error = bus_setup_intr(dev, que->res,
1465 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1466 ixlv_msix_que, que, &que->tag);
1469 device_printf(dev, "Failed to register que handler");
1472 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1473 /* Bind the vector to a CPU */
1475 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1477 bus_bind_intr(dev, que->res, cpu_id);
1479 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1480 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1481 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1482 taskqueue_thread_enqueue, &que->tq);
1484 CPU_SETOF(cpu_id, &cpu_mask);
1485 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1486 &cpu_mask, "%s (bucket %d)",
1487 device_get_nameunit(dev), cpu_id);
1489 taskqueue_start_threads(&que->tq, 1, PI_NET,
1490 "%s que", device_get_nameunit(dev));
1499 * Special implementation of pause for reset flow because
1500 * there is a lock used.
1503 ixlv_msec_pause(int msecs)
1505 int ticks_to_pause = (msecs * hz) / 1000;
1506 int start_ticks = ticks;
1508 if (cold || SCHEDULER_STOPPED()) {
1509 i40e_msec_delay(msecs);
1514 kern_yield(PRI_USER);
1515 int yielded_ticks = ticks - start_ticks;
1516 if (yielded_ticks > ticks_to_pause)
1518 else if (yielded_ticks < 0
1519 && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
1526 ** Requests a VF reset from the PF.
1528 ** Requires the VF's Admin Queue to be initialized.
1531 ixlv_reset(struct ixlv_sc *sc)
1533 struct i40e_hw *hw = &sc->hw;
1534 device_t dev = sc->dev;
1537 /* Ask the PF to reset us if we are initiating */
1538 if (sc->init_state != IXLV_RESET_PENDING)
1539 ixlv_request_reset(sc);
1541 ixlv_msec_pause(100);
1542 error = ixlv_reset_complete(hw);
1544 device_printf(dev, "%s: VF reset failed\n",
1549 error = i40e_shutdown_adminq(hw);
1551 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1556 error = i40e_init_adminq(hw);
1558 device_printf(dev, "%s: init_adminq failed: %d\n",
1567 ixlv_reset_complete(struct i40e_hw *hw)
1571 /* Wait up to ~10 seconds */
1572 for (int i = 0; i < 100; i++) {
1573 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1574 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1576 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1577 (reg == VIRTCHNL_VFR_COMPLETED))
1579 ixlv_msec_pause(100);
1586 /*********************************************************************
1588 * Setup networking device structure and register an interface.
1590 **********************************************************************/
1592 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1595 struct ixl_vsi *vsi = &sc->vsi;
1596 struct ixl_queue *que = vsi->queues;
1598 INIT_DBG_DEV(dev, "begin");
1600 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1602 device_printf(dev, "%s: could not allocate ifnet"
1603 " structure!\n", __func__);
1607 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1609 ifp->if_mtu = ETHERMTU;
1610 #if __FreeBSD_version >= 1100000
1611 ifp->if_baudrate = IF_Gbps(40);
1613 if_initbaudrate(ifp, IF_Gbps(40));
1615 ifp->if_init = ixlv_init;
1616 ifp->if_softc = vsi;
1617 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1618 ifp->if_ioctl = ixlv_ioctl;
1620 #if __FreeBSD_version >= 1100000
1621 if_setgetcounterfn(ifp, ixl_get_counter);
1624 ifp->if_transmit = ixl_mq_start;
1626 ifp->if_qflush = ixl_qflush;
1627 ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1629 ether_ifattach(ifp, sc->hw.mac.addr);
1631 vsi->max_frame_size =
1632 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1633 + ETHER_VLAN_ENCAP_LEN;
1635 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1636 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1637 ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1640 * Tell the upper layer(s) we support long frames.
1642 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1644 ifp->if_capabilities |= IFCAP_HWCSUM;
1645 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1646 ifp->if_capabilities |= IFCAP_TSO;
1647 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1649 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1654 ifp->if_capenable = ifp->if_capabilities;
1657 ** Don't turn this on by default, if vlans are
1658 ** created on another pseudo device (eg. lagg)
1659 ** then vlan events are not passed thru, breaking
1660 ** operation, but with HW FILTER off it works. If
1661 ** using vlans directly on the ixl driver you can
1662 ** enable this and get full hardware tag filtering.
1664 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1667 * Specify the media types supported by this adapter and register
1668 * callbacks to update media and link information
1670 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1673 /* Media types based on reported link speed over AdminQ */
1674 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1675 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1676 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1677 ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1678 ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1680 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1681 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1683 INIT_DBG_DEV(dev, "end");
1688 ** Allocate and setup a single queue
1691 ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
1693 device_t dev = sc->dev;
1694 struct tx_ring *txr;
1695 struct rx_ring *rxr;
1697 int error = I40E_SUCCESS;
1701 txr->tail = I40E_QTX_TAIL1(que->me);
1702 /* Initialize the TX lock */
1703 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1704 device_get_nameunit(dev), que->me);
1705 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1707 * Create the TX descriptor ring
1709 * In Head Writeback mode, the descriptor ring is one bigger
1710 * than the number of descriptors for space for the HW to
1711 * write back index of last completed descriptor.
1713 if (sc->vsi.enable_head_writeback) {
1714 tsize = roundup2((que->num_tx_desc *
1715 sizeof(struct i40e_tx_desc)) +
1716 sizeof(u32), DBA_ALIGN);
1718 tsize = roundup2((que->num_tx_desc *
1719 sizeof(struct i40e_tx_desc)), DBA_ALIGN);
1721 if (i40e_allocate_dma_mem(&sc->hw,
1722 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1724 "Unable to allocate TX Descriptor memory\n");
1726 goto err_destroy_tx_mtx;
1728 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1729 bzero((void *)txr->base, tsize);
1730 /* Now allocate transmit soft structs for the ring */
1731 if (ixl_allocate_tx_data(que)) {
1733 "Critical Failure setting up TX structures\n");
1735 goto err_free_tx_dma;
1737 /* Allocate a buf ring */
1738 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1739 M_WAITOK, &txr->mtx);
1740 if (txr->br == NULL) {
1742 "Critical Failure setting up TX buf ring\n");
1744 goto err_free_tx_data;
1748 * Next the RX queues...
1750 rsize = roundup2(que->num_rx_desc *
1751 sizeof(union i40e_rx_desc), DBA_ALIGN);
1754 rxr->tail = I40E_QRX_TAIL1(que->me);
1756 /* Initialize the RX side lock */
1757 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1758 device_get_nameunit(dev), que->me);
1759 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1761 if (i40e_allocate_dma_mem(&sc->hw,
1762 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1764 "Unable to allocate RX Descriptor memory\n");
1766 goto err_destroy_rx_mtx;
1768 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1769 bzero((void *)rxr->base, rsize);
1771 /* Allocate receive soft structs for the ring */
1772 if (ixl_allocate_rx_data(que)) {
1774 "Critical Failure setting up receive structs\n");
1776 goto err_free_rx_dma;
1782 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1784 mtx_destroy(&rxr->mtx);
1785 /* err_free_tx_buf_ring */
1786 buf_ring_free(txr->br, M_DEVBUF);
1788 ixl_free_que_tx(que);
1790 i40e_free_dma_mem(&sc->hw, &txr->dma);
1792 mtx_destroy(&txr->mtx);
1798 ** Allocate and setup the interface queues
1801 ixlv_setup_queues(struct ixlv_sc *sc)
1803 device_t dev = sc->dev;
1804 struct ixl_vsi *vsi;
1805 struct ixl_queue *que;
1807 int error = I40E_SUCCESS;
1810 vsi->back = (void *)sc;
1814 /* Get memory for the station queues */
1816 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1817 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1818 device_printf(dev, "Unable to allocate queue memory\n");
1822 for (i = 0; i < vsi->num_queues; i++) {
1823 que = &vsi->queues[i];
1824 que->num_tx_desc = vsi->num_tx_desc;
1825 que->num_rx_desc = vsi->num_rx_desc;
1829 if (ixlv_setup_queue(sc, que)) {
1831 goto err_free_queues;
1834 sysctl_ctx_init(&vsi->sysctl_ctx);
1840 ixlv_free_queue(sc, &vsi->queues[i]);
1842 free(vsi->queues, M_DEVBUF);
1848 ** This routine is run via an vlan config EVENT,
1849 ** it enables us to use the HW Filter table since
1850 ** we can get the vlan id. This just creates the
1851 ** entry in the soft version of the VFTA, init will
1852 ** repopulate the real table.
1855 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1857 struct ixl_vsi *vsi = arg;
1858 struct ixlv_sc *sc = vsi->back;
1859 struct ixlv_vlan_filter *v;
1862 if (ifp->if_softc != arg) /* Not our event */
1865 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1868 /* Sanity check - make sure it doesn't already exist */
1869 SLIST_FOREACH(v, sc->vlan_filters, next) {
1870 if (v->vlan == vtag)
1876 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1877 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1879 v->flags = IXL_FILTER_ADD;
1880 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1881 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1882 mtx_unlock(&sc->mtx);
1887 ** This routine is run via an vlan
1888 ** unconfig EVENT, remove our entry
1889 ** in the soft vfta.
1892 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1894 struct ixl_vsi *vsi = arg;
1895 struct ixlv_sc *sc = vsi->back;
1896 struct ixlv_vlan_filter *v;
1899 if (ifp->if_softc != arg)
1902 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1906 SLIST_FOREACH(v, sc->vlan_filters, next) {
1907 if (v->vlan == vtag) {
1908 v->flags = IXL_FILTER_DEL;
1914 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1915 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1916 mtx_unlock(&sc->mtx);
1921 ** Get a new filter and add it to the mac filter list.
1923 static struct ixlv_mac_filter *
1924 ixlv_get_mac_filter(struct ixlv_sc *sc)
1926 struct ixlv_mac_filter *f;
1928 f = malloc(sizeof(struct ixlv_mac_filter),
1929 M_DEVBUF, M_NOWAIT | M_ZERO);
1931 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1937 ** Find the filter with matching MAC address
1939 static struct ixlv_mac_filter *
1940 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1942 struct ixlv_mac_filter *f;
1945 SLIST_FOREACH(f, sc->mac_filters, next) {
1946 if (cmp_etheraddr(f->macaddr, macaddr)) {
1958 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1960 device_t dev = sc->dev;
1963 if (sc->tag != NULL) {
1964 bus_teardown_intr(dev, sc->res, sc->tag);
1966 device_printf(dev, "bus_teardown_intr() for"
1967 " interrupt 0 failed\n");
1972 if (sc->res != NULL) {
1973 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1975 device_printf(dev, "bus_release_resource() for"
1976 " interrupt 0 failed\n");
1987 ** Admin Queue interrupt handler
1990 ixlv_msix_adminq(void *arg)
1992 struct ixlv_sc *sc = arg;
1993 struct i40e_hw *hw = &sc->hw;
1996 reg = rd32(hw, I40E_VFINT_ICR01);
1997 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1999 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
2000 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
2001 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
2004 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2009 ixlv_enable_intr(struct ixl_vsi *vsi)
2011 struct i40e_hw *hw = vsi->hw;
2012 struct ixl_queue *que = vsi->queues;
2014 ixlv_enable_adminq_irq(hw);
2015 for (int i = 0; i < vsi->num_queues; i++, que++)
2016 ixlv_enable_queue_irq(hw, que->me);
2020 ixlv_disable_intr(struct ixl_vsi *vsi)
2022 struct i40e_hw *hw = vsi->hw;
2023 struct ixl_queue *que = vsi->queues;
2025 ixlv_disable_adminq_irq(hw);
2026 for (int i = 0; i < vsi->num_queues; i++, que++)
2027 ixlv_disable_queue_irq(hw, que->me);
2032 ixlv_disable_adminq_irq(struct i40e_hw *hw)
2034 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
2035 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
2037 rd32(hw, I40E_VFGEN_RSTAT);
2042 ixlv_enable_adminq_irq(struct i40e_hw *hw)
2044 wr32(hw, I40E_VFINT_DYN_CTL01,
2045 I40E_VFINT_DYN_CTL01_INTENA_MASK |
2046 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
2047 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
2049 rd32(hw, I40E_VFGEN_RSTAT);
2054 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
2058 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2059 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
2060 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
2061 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
2065 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
2067 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
2068 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2069 rd32(hw, I40E_VFGEN_RSTAT);
2074 * Get initial ITR values from tunable values.
2077 ixlv_configure_itr(struct ixlv_sc *sc)
2079 struct i40e_hw *hw = &sc->hw;
2080 struct ixl_vsi *vsi = &sc->vsi;
2081 struct ixl_queue *que = vsi->queues;
2083 vsi->rx_itr_setting = ixlv_rx_itr;
2084 vsi->tx_itr_setting = ixlv_tx_itr;
2086 for (int i = 0; i < vsi->num_queues; i++, que++) {
2087 struct tx_ring *txr = &que->txr;
2088 struct rx_ring *rxr = &que->rxr;
2090 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
2091 vsi->rx_itr_setting);
2092 rxr->itr = vsi->rx_itr_setting;
2093 rxr->latency = IXL_AVE_LATENCY;
2095 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
2096 vsi->tx_itr_setting);
2097 txr->itr = vsi->tx_itr_setting;
2098 txr->latency = IXL_AVE_LATENCY;
2103 ** Provide a update to the queue RX
2104 ** interrupt moderation value.
2107 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2109 struct ixl_vsi *vsi = que->vsi;
2110 struct i40e_hw *hw = vsi->hw;
2111 struct rx_ring *rxr = &que->rxr;
2117 /* Idle, do nothing */
2118 if (rxr->bytes == 0)
2121 if (ixlv_dynamic_rx_itr) {
2122 rx_bytes = rxr->bytes/rxr->itr;
2125 /* Adjust latency range */
2126 switch (rxr->latency) {
2127 case IXL_LOW_LATENCY:
2128 if (rx_bytes > 10) {
2129 rx_latency = IXL_AVE_LATENCY;
2130 rx_itr = IXL_ITR_20K;
2133 case IXL_AVE_LATENCY:
2134 if (rx_bytes > 20) {
2135 rx_latency = IXL_BULK_LATENCY;
2136 rx_itr = IXL_ITR_8K;
2137 } else if (rx_bytes <= 10) {
2138 rx_latency = IXL_LOW_LATENCY;
2139 rx_itr = IXL_ITR_100K;
2142 case IXL_BULK_LATENCY:
2143 if (rx_bytes <= 20) {
2144 rx_latency = IXL_AVE_LATENCY;
2145 rx_itr = IXL_ITR_20K;
2150 rxr->latency = rx_latency;
2152 if (rx_itr != rxr->itr) {
2153 /* do an exponential smoothing */
2154 rx_itr = (10 * rx_itr * rxr->itr) /
2155 ((9 * rx_itr) + rxr->itr);
2156 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2157 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2158 que->me), rxr->itr);
2160 } else { /* We may have have toggled to non-dynamic */
2161 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2162 vsi->rx_itr_setting = ixlv_rx_itr;
2163 /* Update the hardware if needed */
2164 if (rxr->itr != vsi->rx_itr_setting) {
2165 rxr->itr = vsi->rx_itr_setting;
2166 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2167 que->me), rxr->itr);
2177 ** Provide a update to the queue TX
2178 ** interrupt moderation value.
2181 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2183 struct ixl_vsi *vsi = que->vsi;
2184 struct i40e_hw *hw = vsi->hw;
2185 struct tx_ring *txr = &que->txr;
2191 /* Idle, do nothing */
2192 if (txr->bytes == 0)
2195 if (ixlv_dynamic_tx_itr) {
2196 tx_bytes = txr->bytes/txr->itr;
2199 switch (txr->latency) {
2200 case IXL_LOW_LATENCY:
2201 if (tx_bytes > 10) {
2202 tx_latency = IXL_AVE_LATENCY;
2203 tx_itr = IXL_ITR_20K;
2206 case IXL_AVE_LATENCY:
2207 if (tx_bytes > 20) {
2208 tx_latency = IXL_BULK_LATENCY;
2209 tx_itr = IXL_ITR_8K;
2210 } else if (tx_bytes <= 10) {
2211 tx_latency = IXL_LOW_LATENCY;
2212 tx_itr = IXL_ITR_100K;
2215 case IXL_BULK_LATENCY:
2216 if (tx_bytes <= 20) {
2217 tx_latency = IXL_AVE_LATENCY;
2218 tx_itr = IXL_ITR_20K;
2223 txr->latency = tx_latency;
2225 if (tx_itr != txr->itr) {
2226 /* do an exponential smoothing */
2227 tx_itr = (10 * tx_itr * txr->itr) /
2228 ((9 * tx_itr) + txr->itr);
2229 txr->itr = min(tx_itr, IXL_MAX_ITR);
2230 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2231 que->me), txr->itr);
2234 } else { /* We may have have toggled to non-dynamic */
2235 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2236 vsi->tx_itr_setting = ixlv_tx_itr;
2237 /* Update the hardware if needed */
2238 if (txr->itr != vsi->tx_itr_setting) {
2239 txr->itr = vsi->tx_itr_setting;
2240 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2241 que->me), txr->itr);
2252 ** MSIX Interrupt Handlers and Tasklets
2256 ixlv_handle_que(void *context, int pending)
2258 struct ixl_queue *que = context;
2259 struct ixl_vsi *vsi = que->vsi;
2260 struct i40e_hw *hw = vsi->hw;
2261 struct tx_ring *txr = &que->txr;
2262 struct ifnet *ifp = vsi->ifp;
2265 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2266 more = ixl_rxeof(que, IXL_RX_LIMIT);
2267 mtx_lock(&txr->mtx);
2269 if (!drbr_empty(ifp, txr->br))
2270 ixl_mq_start_locked(ifp, txr);
2271 mtx_unlock(&txr->mtx);
2273 taskqueue_enqueue(que->tq, &que->task);
2278 /* Reenable this interrupt - hmmm */
2279 ixlv_enable_queue_irq(hw, que->me);
2284 /*********************************************************************
2286 * MSIX Queue Interrupt Service routine
2288 **********************************************************************/
2290 ixlv_msix_que(void *arg)
2292 struct ixl_queue *que = arg;
2293 struct ixl_vsi *vsi = que->vsi;
2294 struct i40e_hw *hw = vsi->hw;
2295 struct tx_ring *txr = &que->txr;
2296 bool more_tx, more_rx;
2298 /* Spurious interrupts are ignored */
2299 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2302 /* There are drivers which disable auto-masking of interrupts,
2303 * which is a global setting for all ports. We have to make sure
2304 * to mask it to not lose IRQs */
2305 ixlv_disable_queue_irq(hw, que->me);
2309 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2311 mtx_lock(&txr->mtx);
2312 more_tx = ixl_txeof(que);
2314 ** Make certain that if the stack
2315 ** has anything queued the task gets
2316 ** scheduled to handle it.
2318 if (!drbr_empty(vsi->ifp, txr->br))
2320 mtx_unlock(&txr->mtx);
2322 ixlv_set_queue_rx_itr(que);
2323 ixlv_set_queue_tx_itr(que);
2325 if (more_tx || more_rx)
2326 taskqueue_enqueue(que->tq, &que->task);
2328 ixlv_enable_queue_irq(hw, que->me);
2334 /*********************************************************************
2336 * Media Ioctl callback
2338 * This routine is called whenever the user queries the status of
2339 * the interface using ifconfig.
2341 **********************************************************************/
2343 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2345 struct ixl_vsi *vsi = ifp->if_softc;
2346 struct ixlv_sc *sc = vsi->back;
2348 INIT_DBG_IF(ifp, "begin");
2352 ixlv_update_link_status(sc);
2354 ifmr->ifm_status = IFM_AVALID;
2355 ifmr->ifm_active = IFM_ETHER;
2358 mtx_unlock(&sc->mtx);
2359 INIT_DBG_IF(ifp, "end: link not up");
2363 ifmr->ifm_status |= IFM_ACTIVE;
2364 /* Hardware is always full-duplex */
2365 ifmr->ifm_active |= IFM_FDX;
2367 /* Based on the link speed reported by the PF over the AdminQ, choose a
2368 * PHY type to report. This isn't 100% correct since we don't really
2369 * know the underlying PHY type of the PF, but at least we can report
2370 * a valid link speed...
2372 switch (sc->link_speed) {
2373 case VIRTCHNL_LINK_SPEED_100MB:
2374 ifmr->ifm_active |= IFM_100_TX;
2376 case VIRTCHNL_LINK_SPEED_1GB:
2377 ifmr->ifm_active |= IFM_1000_T;
2379 case VIRTCHNL_LINK_SPEED_10GB:
2380 ifmr->ifm_active |= IFM_10G_SR;
2382 case VIRTCHNL_LINK_SPEED_20GB:
2383 case VIRTCHNL_LINK_SPEED_25GB:
2384 ifmr->ifm_active |= IFM_25G_SR;
2386 case VIRTCHNL_LINK_SPEED_40GB:
2387 ifmr->ifm_active |= IFM_40G_SR4;
2390 ifmr->ifm_active |= IFM_UNKNOWN;
2394 mtx_unlock(&sc->mtx);
2395 INIT_DBG_IF(ifp, "end");
2399 /*********************************************************************
2401 * Media Ioctl callback
2403 * This routine is called when the user changes speed/duplex using
2404 * media/mediopt option with ifconfig.
2406 **********************************************************************/
2408 ixlv_media_change(struct ifnet * ifp)
2410 struct ixl_vsi *vsi = ifp->if_softc;
2411 struct ifmedia *ifm = &vsi->media;
2413 INIT_DBG_IF(ifp, "begin");
2415 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2418 if_printf(ifp, "Changing speed is not supported\n");
2420 INIT_DBG_IF(ifp, "end");
2425 /*********************************************************************
2426 * Multicast Initialization
2428 * This routine is called by init to reset a fresh state.
2430 **********************************************************************/
2433 ixlv_init_multi(struct ixl_vsi *vsi)
2435 struct ixlv_mac_filter *f;
2436 struct ixlv_sc *sc = vsi->back;
2439 IOCTL_DBG_IF(vsi->ifp, "begin");
2441 /* First clear any multicast filters */
2442 SLIST_FOREACH(f, sc->mac_filters, next) {
2443 if ((f->flags & IXL_FILTER_USED)
2444 && (f->flags & IXL_FILTER_MC)) {
2445 f->flags |= IXL_FILTER_DEL;
2450 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2451 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2454 IOCTL_DBG_IF(vsi->ifp, "end");
2458 ixlv_add_multi(struct ixl_vsi *vsi)
2460 struct ifmultiaddr *ifma;
2461 struct ifnet *ifp = vsi->ifp;
2462 struct ixlv_sc *sc = vsi->back;
2465 IOCTL_DBG_IF(ifp, "begin");
2467 if_maddr_rlock(ifp);
2469 ** Get a count, to decide if we
2470 ** simply use multicast promiscuous.
2472 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2473 if (ifma->ifma_addr->sa_family != AF_LINK)
2477 if_maddr_runlock(ifp);
2479 /* TODO: Remove -- cannot set promiscuous mode in a VF */
2480 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2481 /* delete all multicast filters */
2482 ixlv_init_multi(vsi);
2483 sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
2484 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2485 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2487 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2492 if_maddr_rlock(ifp);
2493 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2494 if (ifma->ifma_addr->sa_family != AF_LINK)
2496 if (!ixlv_add_mac_filter(sc,
2497 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2501 if_maddr_runlock(ifp);
2503 ** Notify AQ task that sw filters need to be
2507 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2508 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2511 IOCTL_DBG_IF(ifp, "end");
2515 ixlv_del_multi(struct ixl_vsi *vsi)
2517 struct ixlv_mac_filter *f;
2518 struct ifmultiaddr *ifma;
2519 struct ifnet *ifp = vsi->ifp;
2520 struct ixlv_sc *sc = vsi->back;
2524 IOCTL_DBG_IF(ifp, "begin");
2526 /* Search for removed multicast addresses */
2527 if_maddr_rlock(ifp);
2528 SLIST_FOREACH(f, sc->mac_filters, next) {
2529 if ((f->flags & IXL_FILTER_USED)
2530 && (f->flags & IXL_FILTER_MC)) {
2531 /* check if mac address in filter is in sc's list */
2533 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2534 if (ifma->ifma_addr->sa_family != AF_LINK)
2537 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2538 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2543 /* if this filter is not in the sc's list, remove it */
2544 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2545 f->flags |= IXL_FILTER_DEL;
2547 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2548 MAC_FORMAT_ARGS(f->macaddr));
2550 else if (match == FALSE)
2551 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2552 MAC_FORMAT_ARGS(f->macaddr));
2555 if_maddr_runlock(ifp);
2558 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2559 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2562 IOCTL_DBG_IF(ifp, "end");
2565 /*********************************************************************
2568 * This routine checks for link status,updates statistics,
2569 * and runs the watchdog check.
2571 **********************************************************************/
2574 ixlv_local_timer(void *arg)
2576 struct ixlv_sc *sc = arg;
2577 struct i40e_hw *hw = &sc->hw;
2578 struct ixl_vsi *vsi = &sc->vsi;
2581 IXLV_CORE_LOCK_ASSERT(sc);
2583 /* If Reset is in progress just bail */
2584 if (sc->init_state == IXLV_RESET_PENDING)
2587 /* Check for when PF triggers a VF reset */
2588 val = rd32(hw, I40E_VFGEN_RSTAT) &
2589 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2591 if (val != VIRTCHNL_VFR_VFACTIVE
2592 && val != VIRTCHNL_VFR_COMPLETED) {
2593 DDPRINTF(sc->dev, "reset in progress! (%d)", val);
2597 ixlv_request_stats(sc);
2599 /* clean and process any events */
2600 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2602 /* Increment stat when a queue shows hung */
2603 if (ixl_queue_hang_check(vsi))
2604 sc->watchdog_events++;
2606 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2610 ** Note: this routine updates the OS on the link state
2611 ** the real check of the hardware only happens with
2612 ** a link interrupt.
2615 ixlv_update_link_status(struct ixlv_sc *sc)
2617 struct ixl_vsi *vsi = &sc->vsi;
2618 struct ifnet *ifp = vsi->ifp;
2621 if (vsi->link_active == FALSE) {
2623 if_printf(ifp,"Link is Up, %s\n",
2624 ixlv_vc_speed_to_string(sc->link_speed));
2625 vsi->link_active = TRUE;
2626 if_link_state_change(ifp, LINK_STATE_UP);
2628 } else { /* Link down */
2629 if (vsi->link_active == TRUE) {
2631 if_printf(ifp,"Link is Down\n");
2632 if_link_state_change(ifp, LINK_STATE_DOWN);
2633 vsi->link_active = FALSE;
2640 /*********************************************************************
2642 * This routine disables all traffic on the adapter by issuing a
2643 * global reset on the MAC and deallocates TX/RX buffers.
2645 **********************************************************************/
2648 ixlv_stop(struct ixlv_sc *sc)
2654 INIT_DBG_IF(ifp, "begin");
2656 IXLV_CORE_LOCK_ASSERT(sc);
2658 ixl_vc_flush(&sc->vc_mgr);
2659 ixlv_disable_queues(sc);
2662 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2663 ((ticks - start) < hz/10))
2664 ixlv_do_adminq_locked(sc);
2666 /* Stop the local timer */
2667 callout_stop(&sc->timer);
2669 INIT_DBG_IF(ifp, "end");
2672 /* Free a single queue struct */
2674 ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
2676 struct tx_ring *txr = &que->txr;
2677 struct rx_ring *rxr = &que->rxr;
2679 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2683 buf_ring_free(txr->br, M_DEVBUF);
2684 ixl_free_que_tx(que);
2686 i40e_free_dma_mem(&sc->hw, &txr->dma);
2688 IXL_TX_LOCK_DESTROY(txr);
2690 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2693 ixl_free_que_rx(que);
2695 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2697 IXL_RX_LOCK_DESTROY(rxr);
2700 /*********************************************************************
2702 * Free all station queue structs.
2704 **********************************************************************/
2706 ixlv_free_queues(struct ixl_vsi *vsi)
2708 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2709 struct ixl_queue *que = vsi->queues;
2711 for (int i = 0; i < vsi->num_queues; i++, que++) {
2712 /* First, free the MSI-X resources */
2713 ixlv_free_msix_resources(sc, que);
2714 /* Then free other queue data */
2715 ixlv_free_queue(sc, que);
2718 sysctl_ctx_free(&vsi->sysctl_ctx);
2719 free(vsi->queues, M_DEVBUF);
2723 ixlv_config_rss_reg(struct ixlv_sc *sc)
2725 struct i40e_hw *hw = &sc->hw;
2726 struct ixl_vsi *vsi = &sc->vsi;
2728 u64 set_hena = 0, hena;
2730 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2732 u32 rss_hash_config;
2735 /* Don't set up RSS if using a single queue */
2736 if (vsi->num_queues == 1) {
2737 wr32(hw, I40E_VFQF_HENA(0), 0);
2738 wr32(hw, I40E_VFQF_HENA(1), 0);
2744 /* Fetch the configured RSS key */
2745 rss_getkey((uint8_t *) &rss_seed);
2747 ixl_get_default_rss_key(rss_seed);
2750 /* Fill out hash function seed */
2751 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2752 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2754 /* Enable PCTYPES for RSS: */
2756 rss_hash_config = rss_gethashconfig();
2757 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2758 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2759 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2760 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2761 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2762 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2763 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2764 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2765 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2766 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2767 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2768 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2769 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2770 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2772 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2774 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2775 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2777 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2778 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2780 /* Populate the LUT with max no. of queues in round robin fashion */
2781 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2782 if (j == vsi->num_queues)
2786 * Fetch the RSS bucket id for the given indirection entry.
2787 * Cap it at the number of configured buckets (which is
2790 que_id = rss_get_indirection_to_bucket(i);
2791 que_id = que_id % vsi->num_queues;
2795 /* lut = 4-byte sliding window of 4 lut entries */
2796 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2797 /* On i = 3, we have 4 entries in lut; write to the register */
2799 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2800 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2807 ixlv_config_rss_pf(struct ixlv_sc *sc)
2809 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2810 IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2812 ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2813 IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2815 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2816 IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2820 ** ixlv_config_rss - setup RSS
2822 ** RSS keys and table are cleared on VF reset.
2825 ixlv_config_rss(struct ixlv_sc *sc)
2827 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2828 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2829 ixlv_config_rss_reg(sc);
2830 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2831 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2832 ixlv_config_rss_pf(sc);
2834 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2838 ** This routine refreshes vlan filters, called by init
2839 ** it scans the filter table and then updates the AQ
2842 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2844 struct ixl_vsi *vsi = &sc->vsi;
2845 struct ixlv_vlan_filter *f;
2848 if (vsi->num_vlans == 0)
2851 ** Scan the filter table for vlan entries,
2852 ** and if found call for the AQ update.
2854 SLIST_FOREACH(f, sc->vlan_filters, next)
2855 if (f->flags & IXL_FILTER_ADD)
2858 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2859 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2864 ** This routine adds new MAC filters to the sc's list;
2865 ** these are later added in hardware by sending a virtual
2869 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2871 struct ixlv_mac_filter *f;
2873 /* Does one already exist? */
2874 f = ixlv_find_mac_filter(sc, macaddr);
2876 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2877 MAC_FORMAT_ARGS(macaddr));
2881 /* If not, get a new empty filter */
2882 f = ixlv_get_mac_filter(sc);
2884 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2889 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2890 MAC_FORMAT_ARGS(macaddr));
2892 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2893 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2899 ** Marks a MAC filter for deletion.
2902 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2904 struct ixlv_mac_filter *f;
2906 f = ixlv_find_mac_filter(sc, macaddr);
2910 f->flags |= IXL_FILTER_DEL;
2915 ** Tasklet handler for MSIX Adminq interrupts
2916 ** - done outside interrupt context since it might sleep
2919 ixlv_do_adminq(void *context, int pending)
2921 struct ixlv_sc *sc = context;
2924 ixlv_do_adminq_locked(sc);
2925 mtx_unlock(&sc->mtx);
2930 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2932 struct i40e_hw *hw = &sc->hw;
2933 struct i40e_arq_event_info event;
2934 struct virtchnl_msg *v_msg;
2935 device_t dev = sc->dev;
2939 bool aq_error = false;
2941 IXLV_CORE_LOCK_ASSERT(sc);
2943 event.buf_len = IXL_AQ_BUF_SZ;
2944 event.msg_buf = sc->aq_buffer;
2945 v_msg = (struct virtchnl_msg *)&event.desc;
2948 ret = i40e_clean_arq_element(hw, &event, &result);
2951 ixlv_vc_completion(sc, v_msg->v_opcode,
2952 v_msg->v_retval, event.msg_buf, event.msg_len);
2954 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2957 /* check for Admin queue errors */
2958 oldreg = reg = rd32(hw, hw->aq.arq.len);
2959 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2960 device_printf(dev, "ARQ VF Error detected\n");
2961 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2964 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2965 device_printf(dev, "ARQ Overflow Error detected\n");
2966 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2969 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2970 device_printf(dev, "ARQ Critical Error detected\n");
2971 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2975 wr32(hw, hw->aq.arq.len, reg);
2977 oldreg = reg = rd32(hw, hw->aq.asq.len);
2978 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2979 device_printf(dev, "ASQ VF Error detected\n");
2980 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2983 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2984 device_printf(dev, "ASQ Overflow Error detected\n");
2985 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2988 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2989 device_printf(dev, "ASQ Critical Error detected\n");
2990 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2994 wr32(hw, hw->aq.asq.len, reg);
2997 /* Need to reset adapter */
2998 device_printf(dev, "WARNING: Resetting!\n");
2999 sc->init_state = IXLV_RESET_REQUIRED;
3001 ixlv_init_locked(sc);
3003 ixlv_enable_adminq_irq(hw);
3007 ixlv_add_sysctls(struct ixlv_sc *sc)
3009 device_t dev = sc->dev;
3010 struct ixl_vsi *vsi = &sc->vsi;
3012 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3013 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3014 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3016 /* Driver statistics sysctls */
3017 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
3018 CTLFLAG_RD, &sc->watchdog_events,
3019 "Watchdog timeouts");
3020 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
3021 CTLFLAG_RD, &sc->admin_irq,
3022 "Admin Queue IRQ Handled");
3024 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
3025 CTLFLAG_RD, &vsi->num_tx_desc, 0,
3027 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
3028 CTLFLAG_RD, &vsi->num_rx_desc, 0,
3031 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
3032 CTLTYPE_STRING | CTLFLAG_RD,
3033 sc, 0, ixlv_sysctl_current_speed,
3034 "A", "Current Port Speed");
3036 ixl_add_sysctls_eth_stats(ctx, child, &vsi->eth_stats);
3038 /* VSI statistics sysctls */
3039 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3040 CTLFLAG_RD, NULL, "VSI-specific statistics");
3041 ixl_vsi_add_queues_stats(vsi);
3045 ixlv_init_filters(struct ixlv_sc *sc)
3047 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3048 M_DEVBUF, M_NOWAIT | M_ZERO);
3049 SLIST_INIT(sc->mac_filters);
3050 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3051 M_DEVBUF, M_NOWAIT | M_ZERO);
3052 SLIST_INIT(sc->vlan_filters);
3057 ixlv_free_filters(struct ixlv_sc *sc)
3059 struct ixlv_mac_filter *f;
3060 struct ixlv_vlan_filter *v;
3062 while (!SLIST_EMPTY(sc->mac_filters)) {
3063 f = SLIST_FIRST(sc->mac_filters);
3064 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3067 free(sc->mac_filters, M_DEVBUF);
3068 while (!SLIST_EMPTY(sc->vlan_filters)) {
3069 v = SLIST_FIRST(sc->vlan_filters);
3070 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3073 free(sc->vlan_filters, M_DEVBUF);
3078 ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
3092 switch (link_speed) {
3093 case VIRTCHNL_LINK_SPEED_100MB:
3096 case VIRTCHNL_LINK_SPEED_1GB:
3099 case VIRTCHNL_LINK_SPEED_10GB:
3102 case VIRTCHNL_LINK_SPEED_40GB:
3105 case VIRTCHNL_LINK_SPEED_20GB:
3108 case VIRTCHNL_LINK_SPEED_25GB:
3111 case VIRTCHNL_LINK_SPEED_UNKNOWN:
3117 return speeds[index];
3121 ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3123 struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
3126 error = sysctl_handle_string(oidp,
3127 ixlv_vc_speed_to_string(sc->link_speed),
3134 * ixlv_sysctl_qtx_tail_handler
3135 * Retrieves I40E_QTX_TAIL1 value from hardware
3139 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3141 struct ixl_queue *que;
3145 que = ((struct ixl_queue *)oidp->oid_arg1);
3148 val = rd32(que->vsi->hw, que->txr.tail);
3149 error = sysctl_handle_int(oidp, &val, 0, req);
3150 if (error || !req->newptr)
3156 * ixlv_sysctl_qrx_tail_handler
3157 * Retrieves I40E_QRX_TAIL1 value from hardware
3161 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3163 struct ixl_queue *que;
3167 que = ((struct ixl_queue *)oidp->oid_arg1);
3170 val = rd32(que->vsi->hw, que->rxr.tail);
3171 error = sysctl_handle_int(oidp, &val, 0, req);
3172 if (error || !req->newptr)