1 /******************************************************************************
3 Copyright (c) 2013-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
38 /*********************************************************************
40 *********************************************************************/
41 #define IXLV_DRIVER_VERSION_MAJOR 1
42 #define IXLV_DRIVER_VERSION_MINOR 5
43 #define IXLV_DRIVER_VERSION_BUILD 4
45 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
46 __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
47 __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
49 /*********************************************************************
52 * Used by probe to select devices to load on
53 * Last field stores an index into ixlv_strings
54 * Last entry must be all 0s
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 *********************************************************************/
59 static ixl_vendor_info_t ixlv_vendor_info_array[] =
61 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
62 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
63 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixlv_strings[] = {
73 "Intel(R) Ethernet Connection 700 Series VF Driver"
77 /*********************************************************************
79 *********************************************************************/
80 static int ixlv_probe(device_t);
81 static int ixlv_attach(device_t);
82 static int ixlv_detach(device_t);
83 static int ixlv_shutdown(device_t);
84 static void ixlv_init_locked(struct ixlv_sc *);
85 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
86 static void ixlv_free_pci_resources(struct ixlv_sc *);
87 static int ixlv_assign_msix(struct ixlv_sc *);
88 static int ixlv_init_msix(struct ixlv_sc *);
89 static int ixlv_init_taskqueue(struct ixlv_sc *);
90 static int ixlv_setup_queues(struct ixlv_sc *);
91 static void ixlv_config_rss(struct ixlv_sc *);
92 static void ixlv_stop(struct ixlv_sc *);
93 static void ixlv_add_multi(struct ixl_vsi *);
94 static void ixlv_del_multi(struct ixl_vsi *);
95 static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
96 static void ixlv_free_queues(struct ixl_vsi *);
97 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
98 static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
100 static int ixlv_media_change(struct ifnet *);
101 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
103 static void ixlv_local_timer(void *);
105 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107 static void ixlv_init_filters(struct ixlv_sc *);
108 static void ixlv_free_filters(struct ixlv_sc *);
110 static void ixlv_msix_que(void *);
111 static void ixlv_msix_adminq(void *);
112 static void ixlv_do_adminq(void *, int);
113 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
114 static void ixlv_handle_que(void *, int);
115 static int ixlv_reset(struct ixlv_sc *);
116 static int ixlv_reset_complete(struct i40e_hw *);
117 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
118 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
119 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120 enum i40e_status_code);
121 static void ixlv_configure_itr(struct ixlv_sc *);
123 static void ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
128 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
132 static void ixlv_init_hw(struct ixlv_sc *);
133 static int ixlv_setup_vc(struct ixlv_sc *);
134 static int ixlv_vf_config(struct ixlv_sc *);
136 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
137 struct ifnet *, int);
139 static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
140 static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
142 static void ixlv_add_sysctls(struct ixlv_sc *);
144 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
145 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
148 /*********************************************************************
149 * FreeBSD Device Interface Entry Points
150 *********************************************************************/
152 static device_method_t ixlv_methods[] = {
153 /* Device interface */
154 DEVMETHOD(device_probe, ixlv_probe),
155 DEVMETHOD(device_attach, ixlv_attach),
156 DEVMETHOD(device_detach, ixlv_detach),
157 DEVMETHOD(device_shutdown, ixlv_shutdown),
161 static driver_t ixlv_driver = {
162 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
165 devclass_t ixlv_devclass;
166 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
168 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
169 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
172 ** TUNEABLE PARAMETERS:
175 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
176 "IXLV driver parameters");
179 ** Number of descriptors per ring:
180 ** - TX and RX sizes are independently configurable
182 static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
183 TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
184 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
185 &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
187 static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
188 TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
189 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
190 &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
192 /* Set to zero to auto calculate */
193 int ixlv_max_queues = 0;
194 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
195 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
196 &ixlv_max_queues, 0, "Number of Queues");
199 ** Number of entries in Tx queue buf_ring.
200 ** Increasing this will reduce the number of
201 ** errors when transmitting fragmented UDP
204 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
205 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
207 &ixlv_txbrsz, 0, "TX Buf Ring Size");
210 * Different method for processing TX descriptor
213 static int ixlv_enable_head_writeback = 0;
214 TUNABLE_INT("hw.ixlv.enable_head_writeback",
215 &ixlv_enable_head_writeback);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217 &ixlv_enable_head_writeback, 0,
218 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
221 ** Controls for Interrupt Throttling
222 ** - true/false for dynamic adjustment
223 ** - default values for static ITR
225 int ixlv_dynamic_rx_itr = 0;
226 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
227 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
228 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
230 int ixlv_dynamic_tx_itr = 0;
231 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
232 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
233 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
235 int ixlv_rx_itr = IXL_ITR_8K;
236 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
237 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
238 &ixlv_rx_itr, 0, "RX Interrupt Rate");
240 int ixlv_tx_itr = IXL_ITR_4K;
241 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
242 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
243 &ixlv_tx_itr, 0, "TX Interrupt Rate");
245 /*********************************************************************
246 * Device identification routine
248 * ixlv_probe determines if the driver should be loaded on
249 * the hardware based on PCI vendor/device id of the device.
251 * return BUS_PROBE_DEFAULT on success, positive on failure
252 *********************************************************************/
255 ixlv_probe(device_t dev)
257 ixl_vendor_info_t *ent;
259 u16 pci_vendor_id, pci_device_id;
260 u16 pci_subvendor_id, pci_subdevice_id;
261 char device_name[256];
264 INIT_DEBUGOUT("ixlv_probe: begin");
267 pci_vendor_id = pci_get_vendor(dev);
268 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
271 pci_device_id = pci_get_device(dev);
272 pci_subvendor_id = pci_get_subvendor(dev);
273 pci_subdevice_id = pci_get_subdevice(dev);
275 ent = ixlv_vendor_info_array;
276 while (ent->vendor_id != 0) {
277 if ((pci_vendor_id == ent->vendor_id) &&
278 (pci_device_id == ent->device_id) &&
280 ((pci_subvendor_id == ent->subvendor_id) ||
281 (ent->subvendor_id == 0)) &&
283 ((pci_subdevice_id == ent->subdevice_id) ||
284 (ent->subdevice_id == 0))) {
285 sprintf(device_name, "%s, Version - %s",
286 ixlv_strings[ent->index],
287 ixlv_driver_version);
288 device_set_desc_copy(dev, device_name);
289 return (BUS_PROBE_DEFAULT);
296 /*********************************************************************
297 * Device initialization routine
299 * The attach entry point is called when the driver is being loaded.
300 * This routine identifies the type of hardware, allocates all resources
301 * and initializes the hardware.
303 * return 0 on success, positive on failure
304 *********************************************************************/
307 ixlv_attach(device_t dev)
314 INIT_DBG_DEV(dev, "begin");
316 /* Allocate, clear, and link in our primary soft structure */
317 sc = device_get_softc(dev);
318 sc->dev = sc->osdep.dev = dev;
323 /* Initialize hw struct */
326 /* Allocate filter lists */
327 ixlv_init_filters(sc);
329 /* Save this tunable */
330 vsi->enable_head_writeback = ixlv_enable_head_writeback;
333 mtx_init(&sc->mtx, device_get_nameunit(dev),
334 "IXL SC Lock", MTX_DEF);
336 /* Set up the timer callout */
337 callout_init_mtx(&sc->timer, &sc->mtx, 0);
339 /* Do PCI setup - map BAR0, etc */
340 if (ixlv_allocate_pci_resources(sc)) {
341 device_printf(dev, "%s: Allocation of PCI resources failed\n",
347 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
349 error = i40e_set_mac_type(hw);
351 device_printf(dev, "%s: set_mac_type failed: %d\n",
356 error = ixlv_reset_complete(hw);
358 device_printf(dev, "%s: Device is still being reset\n",
363 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
365 error = ixlv_setup_vc(sc);
367 device_printf(dev, "%s: Error setting up PF comms, %d\n",
372 INIT_DBG_DEV(dev, "PF API version verified");
374 /* Need API version before sending reset message */
375 error = ixlv_reset(sc);
377 device_printf(dev, "VF reset failed; reload the driver\n");
381 INIT_DBG_DEV(dev, "VF reset complete");
383 /* Ask for VF config from PF */
384 error = ixlv_vf_config(sc);
386 device_printf(dev, "Error getting configuration from PF: %d\n",
391 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
392 sc->vf_res->num_vsis,
393 sc->vf_res->num_queue_pairs,
394 sc->vf_res->max_vectors,
395 sc->vf_res->rss_key_size,
396 sc->vf_res->rss_lut_size);
398 device_printf(dev, "Offload flags: 0x%b\n",
399 sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
402 /* got VF config message back from PF, now we can parse it */
403 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
404 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
405 sc->vsi_res = &sc->vf_res->vsi_res[i];
408 device_printf(dev, "%s: no LAN VSI found\n", __func__);
413 INIT_DBG_DEV(dev, "Resource Acquisition complete");
415 /* If no mac address was assigned just make a random one */
416 if (!ixlv_check_ether_addr(hw->mac.addr)) {
417 u8 addr[ETHER_ADDR_LEN];
418 arc4rand(&addr, sizeof(addr), 0);
421 bcopy(addr, hw->mac.addr, sizeof(addr));
424 /* Now that the number of queues for this VF is known, set up interrupts */
425 sc->msix = ixlv_init_msix(sc);
426 /* We fail without MSIX support */
432 vsi->id = sc->vsi_res->vsi_id;
433 vsi->back = (void *)sc;
434 vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
436 ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
438 /* This allocates the memory and early settings */
439 if (ixlv_setup_queues(sc) != 0) {
440 device_printf(dev, "%s: setup queues failed!\n",
446 /* Do queue interrupt setup */
447 if (ixlv_assign_msix(sc) != 0) {
448 device_printf(dev, "%s: allocating queue interrupts failed!\n",
454 INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
456 /* Setup the stack interface */
457 if (ixlv_setup_interface(dev, sc) != 0) {
458 device_printf(dev, "%s: setup interface failed!\n",
464 INIT_DBG_DEV(dev, "Interface setup complete");
466 /* Start AdminQ taskqueue */
467 ixlv_init_taskqueue(sc);
469 /* We expect a link state message, so schedule the AdminQ task now */
470 taskqueue_enqueue(sc->tq, &sc->aq_irq);
472 /* Initialize stats */
473 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
474 ixlv_add_sysctls(sc);
476 /* Register for VLAN events */
477 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
478 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
479 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
480 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
482 /* We want AQ enabled early */
483 ixlv_enable_adminq_irq(hw);
485 /* Set things up to run init */
486 sc->init_state = IXLV_INIT_READY;
488 ixl_vc_init_mgr(sc, &sc->vc_mgr);
490 INIT_DBG_DEV(dev, "end");
494 ixlv_free_queues(vsi);
495 ixlv_teardown_adminq_msix(sc);
497 free(sc->vf_res, M_DEVBUF);
499 i40e_shutdown_adminq(hw);
501 ixlv_free_pci_resources(sc);
503 mtx_destroy(&sc->mtx);
504 ixlv_free_filters(sc);
505 INIT_DBG_DEV(dev, "end: error %d", error);
509 /*********************************************************************
510 * Device removal routine
512 * The detach entry point is called when the driver is being removed.
513 * This routine stops the adapter and deallocates all the resources
514 * that were allocated for driver operation.
516 * return 0 on success, positive on failure
517 *********************************************************************/
520 ixlv_detach(device_t dev)
522 struct ixlv_sc *sc = device_get_softc(dev);
523 struct ixl_vsi *vsi = &sc->vsi;
524 struct i40e_hw *hw = &sc->hw;
525 enum i40e_status_code status;
527 INIT_DBG_DEV(dev, "begin");
529 /* Make sure VLANS are not using driver */
530 if (vsi->ifp->if_vlantrunk != NULL) {
531 if_printf(vsi->ifp, "Vlan in use, detach first\n");
535 /* Remove all the media and link information */
536 ifmedia_removeall(&sc->media);
539 ether_ifdetach(vsi->ifp);
540 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
543 mtx_unlock(&sc->mtx);
546 /* Unregister VLAN events */
547 if (vsi->vlan_attach != NULL)
548 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
549 if (vsi->vlan_detach != NULL)
550 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
553 callout_drain(&sc->vc_mgr.callout);
555 ixlv_disable_adminq_irq(hw);
556 ixlv_teardown_adminq_msix(sc);
557 /* Drain admin queue taskqueue */
558 taskqueue_free(sc->tq);
559 status = i40e_shutdown_adminq(&sc->hw);
560 if (status != I40E_SUCCESS) {
562 "i40e_shutdown_adminq() failed with status %s\n",
563 i40e_stat_str(hw, status));
567 free(sc->vf_res, M_DEVBUF);
568 ixlv_free_queues(vsi);
569 ixlv_free_pci_resources(sc);
570 ixlv_free_filters(sc);
572 bus_generic_detach(dev);
573 mtx_destroy(&sc->mtx);
574 INIT_DBG_DEV(dev, "end");
578 /*********************************************************************
580 * Shutdown entry point
582 **********************************************************************/
585 ixlv_shutdown(device_t dev)
587 struct ixlv_sc *sc = device_get_softc(dev);
589 INIT_DBG_DEV(dev, "begin");
593 mtx_unlock(&sc->mtx);
595 INIT_DBG_DEV(dev, "end");
600 * Configure TXCSUM(IPV6) and TSO(4/6)
601 * - the hardware handles these together so we
605 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
607 /* Enable/disable TXCSUM/TSO4 */
608 if (!(ifp->if_capenable & IFCAP_TXCSUM)
609 && !(ifp->if_capenable & IFCAP_TSO4)) {
610 if (mask & IFCAP_TXCSUM) {
611 ifp->if_capenable |= IFCAP_TXCSUM;
612 /* enable TXCSUM, restore TSO if previously enabled */
613 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
614 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
615 ifp->if_capenable |= IFCAP_TSO4;
618 else if (mask & IFCAP_TSO4) {
619 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
620 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
622 "TSO4 requires txcsum, enabling both...\n");
624 } else if((ifp->if_capenable & IFCAP_TXCSUM)
625 && !(ifp->if_capenable & IFCAP_TSO4)) {
626 if (mask & IFCAP_TXCSUM)
627 ifp->if_capenable &= ~IFCAP_TXCSUM;
628 else if (mask & IFCAP_TSO4)
629 ifp->if_capenable |= IFCAP_TSO4;
630 } else if((ifp->if_capenable & IFCAP_TXCSUM)
631 && (ifp->if_capenable & IFCAP_TSO4)) {
632 if (mask & IFCAP_TXCSUM) {
633 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
634 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
636 "TSO4 requires txcsum, disabling both...\n");
637 } else if (mask & IFCAP_TSO4)
638 ifp->if_capenable &= ~IFCAP_TSO4;
641 /* Enable/disable TXCSUM_IPV6/TSO6 */
642 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
643 && !(ifp->if_capenable & IFCAP_TSO6)) {
644 if (mask & IFCAP_TXCSUM_IPV6) {
645 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
646 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
647 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
648 ifp->if_capenable |= IFCAP_TSO6;
650 } else if (mask & IFCAP_TSO6) {
651 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
652 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
654 "TSO6 requires txcsum6, enabling both...\n");
656 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
657 && !(ifp->if_capenable & IFCAP_TSO6)) {
658 if (mask & IFCAP_TXCSUM_IPV6)
659 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
660 else if (mask & IFCAP_TSO6)
661 ifp->if_capenable |= IFCAP_TSO6;
662 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
663 && (ifp->if_capenable & IFCAP_TSO6)) {
664 if (mask & IFCAP_TXCSUM_IPV6) {
665 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
666 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
668 "TSO6 requires txcsum6, disabling both...\n");
669 } else if (mask & IFCAP_TSO6)
670 ifp->if_capenable &= ~IFCAP_TSO6;
674 /*********************************************************************
677 * ixlv_ioctl is called when the user wants to configure the
680 * return 0 on success, positive on failure
681 **********************************************************************/
684 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
686 struct ixl_vsi *vsi = ifp->if_softc;
687 struct ixlv_sc *sc = vsi->back;
688 struct ifreq *ifr = (struct ifreq *)data;
689 #if defined(INET) || defined(INET6)
690 struct ifaddr *ifa = (struct ifaddr *)data;
691 bool avoid_reset = FALSE;
700 if (ifa->ifa_addr->sa_family == AF_INET)
704 if (ifa->ifa_addr->sa_family == AF_INET6)
707 #if defined(INET) || defined(INET6)
709 ** Calling init results in link renegotiation,
710 ** so we avoid doing it when possible.
713 ifp->if_flags |= IFF_UP;
714 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
717 if (!(ifp->if_flags & IFF_NOARP))
718 arp_ifinit(ifp, ifa);
721 error = ether_ioctl(ifp, command, data);
725 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
727 if (ifr->ifr_mtu > IXL_MAX_FRAME -
728 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
730 IOCTL_DBG_IF(ifp, "mtu too large");
732 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
733 // ERJ: Interestingly enough, these types don't match
734 ifp->if_mtu = (u_long)ifr->ifr_mtu;
735 vsi->max_frame_size =
736 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
737 + ETHER_VLAN_ENCAP_LEN;
738 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
739 ixlv_init_locked(sc);
741 mtx_unlock(&sc->mtx);
744 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
746 if (ifp->if_flags & IFF_UP) {
747 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
748 ixlv_init_locked(sc);
750 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
752 sc->if_flags = ifp->if_flags;
753 mtx_unlock(&sc->mtx);
756 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
757 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
759 ixlv_disable_intr(vsi);
761 ixlv_enable_intr(vsi);
762 mtx_unlock(&sc->mtx);
766 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
767 if (sc->init_state == IXLV_RUNNING) {
769 ixlv_disable_intr(vsi);
771 ixlv_enable_intr(vsi);
772 mtx_unlock(&sc->mtx);
777 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
778 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
782 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
783 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
785 ixlv_cap_txcsum_tso(vsi, ifp, mask);
787 if (mask & IFCAP_RXCSUM)
788 ifp->if_capenable ^= IFCAP_RXCSUM;
789 if (mask & IFCAP_RXCSUM_IPV6)
790 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
791 if (mask & IFCAP_LRO)
792 ifp->if_capenable ^= IFCAP_LRO;
793 if (mask & IFCAP_VLAN_HWTAGGING)
794 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
795 if (mask & IFCAP_VLAN_HWFILTER)
796 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
797 if (mask & IFCAP_VLAN_HWTSO)
798 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
799 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802 VLAN_CAPABILITIES(ifp);
808 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
809 error = ether_ioctl(ifp, command, data);
817 ** To do a reinit on the VF is unfortunately more complicated
818 ** than a physical device, we must have the PF more or less
819 ** completely recreate our memory, so many things that were
820 ** done only once at attach in traditional drivers now must be
821 ** redone at each reinitialization. This function does that
822 ** 'prelude' so we can then call the normal locked init code.
825 ixlv_reinit_locked(struct ixlv_sc *sc)
827 struct i40e_hw *hw = &sc->hw;
828 struct ixl_vsi *vsi = &sc->vsi;
829 struct ifnet *ifp = vsi->ifp;
830 struct ixlv_mac_filter *mf, *mf_temp;
831 struct ixlv_vlan_filter *vf;
834 INIT_DBG_IF(ifp, "begin");
836 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
839 error = ixlv_reset(sc);
841 INIT_DBG_IF(ifp, "VF was reset");
843 /* set the state in case we went thru RESET */
844 sc->init_state = IXLV_RUNNING;
847 ** Resetting the VF drops all filters from hardware;
848 ** we need to mark them to be re-added in init.
850 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
851 if (mf->flags & IXL_FILTER_DEL) {
852 SLIST_REMOVE(sc->mac_filters, mf,
853 ixlv_mac_filter, next);
856 mf->flags |= IXL_FILTER_ADD;
858 if (vsi->num_vlans != 0)
859 SLIST_FOREACH(vf, sc->vlan_filters, next)
860 vf->flags = IXL_FILTER_ADD;
861 else { /* clean any stale filters */
862 while (!SLIST_EMPTY(sc->vlan_filters)) {
863 vf = SLIST_FIRST(sc->vlan_filters);
864 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
869 ixlv_enable_adminq_irq(hw);
870 ixl_vc_flush(&sc->vc_mgr);
872 INIT_DBG_IF(ifp, "end");
877 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
878 enum i40e_status_code code)
885 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
886 * happens while a command is in progress, so we don't print an error
889 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
890 if_printf(sc->vsi.ifp,
891 "Error %s waiting for PF to complete operation %d\n",
892 i40e_stat_str(&sc->hw, code), cmd->request);
897 ixlv_init_locked(struct ixlv_sc *sc)
899 struct i40e_hw *hw = &sc->hw;
900 struct ixl_vsi *vsi = &sc->vsi;
901 struct ixl_queue *que = vsi->queues;
902 struct ifnet *ifp = vsi->ifp;
905 INIT_DBG_IF(ifp, "begin");
907 IXLV_CORE_LOCK_ASSERT(sc);
909 /* Do a reinit first if an init has already been done */
910 if ((sc->init_state == IXLV_RUNNING) ||
911 (sc->init_state == IXLV_RESET_REQUIRED) ||
912 (sc->init_state == IXLV_RESET_PENDING))
913 error = ixlv_reinit_locked(sc);
914 /* Don't bother with init if we failed reinit */
918 /* Remove existing MAC filter if new MAC addr is set */
919 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
920 error = ixlv_del_mac_filter(sc, hw->mac.addr);
922 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
923 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
927 /* Check for an LAA mac address... */
928 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
930 ifp->if_hwassist = 0;
931 if (ifp->if_capenable & IFCAP_TSO)
932 ifp->if_hwassist |= CSUM_TSO;
933 if (ifp->if_capenable & IFCAP_TXCSUM)
934 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
935 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
938 /* Add mac filter for this VF to PF */
939 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
940 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
941 if (!error || error == EEXIST)
942 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
943 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
947 /* Setup vlan's if needed */
948 ixlv_setup_vlan_filters(sc);
950 /* Prepare the queues for operation */
951 for (int i = 0; i < vsi->num_queues; i++, que++) {
952 struct rx_ring *rxr = &que->rxr;
954 ixl_init_tx_ring(que);
956 if (vsi->max_frame_size <= MCLBYTES)
957 rxr->mbuf_sz = MCLBYTES;
959 rxr->mbuf_sz = MJUMPAGESIZE;
960 ixl_init_rx_ring(que);
963 /* Set initial ITR values */
964 ixlv_configure_itr(sc);
966 /* Configure queues */
967 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
968 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
974 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
975 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
978 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
979 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
981 /* Start the local timer */
982 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
984 sc->init_state = IXLV_RUNNING;
987 INIT_DBG_IF(ifp, "end");
992 ** Init entry point for the stack
997 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
998 struct ixlv_sc *sc = vsi->back;
1001 /* Prevent init from running again while waiting for AQ calls
1002 * made in init_locked() to complete. */
1004 if (sc->init_in_progress) {
1005 mtx_unlock(&sc->mtx);
1008 sc->init_in_progress = true;
1010 ixlv_init_locked(sc);
1011 mtx_unlock(&sc->mtx);
1013 /* Wait for init_locked to finish */
1014 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
1015 && ++retries < IXLV_MAX_INIT_WAIT) {
1016 i40e_msec_pause(25);
1018 if (retries >= IXLV_MAX_INIT_WAIT) {
1020 "Init failed to complete in allotted time!\n");
1024 sc->init_in_progress = false;
1025 mtx_unlock(&sc->mtx);
1029 * ixlv_attach() helper function; gathers information about
1030 * the (virtual) hardware for use elsewhere in the driver.
1033 ixlv_init_hw(struct ixlv_sc *sc)
1035 struct i40e_hw *hw = &sc->hw;
1036 device_t dev = sc->dev;
1038 /* Save off the information about this board */
1039 hw->vendor_id = pci_get_vendor(dev);
1040 hw->device_id = pci_get_device(dev);
1041 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1042 hw->subsystem_vendor_id =
1043 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1044 hw->subsystem_device_id =
1045 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1047 hw->bus.device = pci_get_slot(dev);
1048 hw->bus.func = pci_get_function(dev);
1052 * ixlv_attach() helper function; initalizes the admin queue
1053 * and attempts to establish contact with the PF by
1054 * retrying the initial "API version" message several times
1055 * or until the PF responds.
1058 ixlv_setup_vc(struct ixlv_sc *sc)
1060 struct i40e_hw *hw = &sc->hw;
1061 device_t dev = sc->dev;
1062 int error = 0, ret_error = 0, asq_retries = 0;
1063 bool send_api_ver_retried = 0;
1065 /* Need to set these AQ paramters before initializing AQ */
1066 hw->aq.num_arq_entries = IXL_AQ_LEN;
1067 hw->aq.num_asq_entries = IXL_AQ_LEN;
1068 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1069 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1071 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1072 /* Initialize admin queue */
1073 error = i40e_init_adminq(hw);
1075 device_printf(dev, "%s: init_adminq failed: %d\n",
1081 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1082 " send_api_ver attempt %d", i+1);
1085 /* Send VF's API version */
1086 error = ixlv_send_api_ver(sc);
1088 i40e_shutdown_adminq(hw);
1090 device_printf(dev, "%s: unable to send api"
1091 " version to PF on attempt %d, error %d\n",
1092 __func__, i+1, error);
1096 while (!i40e_asq_done(hw)) {
1097 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1098 i40e_shutdown_adminq(hw);
1099 device_printf(dev, "Admin Queue timeout "
1100 "(waiting for send_api_ver), %d more tries...\n",
1101 IXLV_AQ_MAX_ERR - (i + 1));
1105 i40e_msec_pause(10);
1107 if (asq_retries > IXLV_AQ_MAX_ERR)
1110 INIT_DBG_DEV(dev, "Sent API version message to PF");
1112 /* Verify that the VF accepts the PF's API version */
1113 error = ixlv_verify_api_ver(sc);
1114 if (error == ETIMEDOUT) {
1115 if (!send_api_ver_retried) {
1116 /* Resend message, one more time */
1117 send_api_ver_retried = true;
1119 "%s: Timeout while verifying API version on first"
1120 " try!\n", __func__);
1124 "%s: Timeout while verifying API version on second"
1125 " try!\n", __func__);
1132 "%s: Unable to verify API version,"
1133 " error %s\n", __func__, i40e_stat_str(hw, error));
1140 i40e_shutdown_adminq(hw);
1145 * ixlv_attach() helper function; asks the PF for this VF's
1146 * configuration, and saves the information if it receives it.
1149 ixlv_vf_config(struct ixlv_sc *sc)
1151 struct i40e_hw *hw = &sc->hw;
1152 device_t dev = sc->dev;
1153 int bufsz, error = 0, ret_error = 0;
1154 int asq_retries, retried = 0;
1157 error = ixlv_send_vf_config_msg(sc);
1160 "%s: Unable to send VF config request, attempt %d,"
1161 " error %d\n", __func__, retried + 1, error);
1166 while (!i40e_asq_done(hw)) {
1167 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1168 device_printf(dev, "%s: Admin Queue timeout "
1169 "(waiting for send_vf_config_msg), attempt %d\n",
1170 __func__, retried + 1);
1174 i40e_msec_pause(10);
1177 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1181 bufsz = sizeof(struct virtchnl_vf_resource) +
1182 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1183 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1186 "%s: Unable to allocate memory for VF configuration"
1187 " message from PF on attempt %d\n", __func__, retried + 1);
1193 /* Check for VF config response */
1194 error = ixlv_get_vf_config(sc);
1195 if (error == ETIMEDOUT) {
1196 /* The 1st time we timeout, send the configuration message again */
1202 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1207 "%s: Unable to get VF configuration from PF after %d tries!\n",
1208 __func__, retried + 1);
1214 free(sc->vf_res, M_DEVBUF);
1220 * Allocate MSI/X vectors, setup the AQ vector early
1223 ixlv_init_msix(struct ixlv_sc *sc)
1225 device_t dev = sc->dev;
1226 int rid, want, vectors, queues, available;
1227 int auto_max_queues;
1229 rid = PCIR_BAR(IXL_MSIX_BAR);
1230 sc->msix_mem = bus_alloc_resource_any(dev,
1231 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1232 if (!sc->msix_mem) {
1233 /* May not be enabled */
1234 device_printf(sc->dev,
1235 "Unable to map MSIX table\n");
1239 available = pci_msix_count(dev);
1240 if (available == 0) { /* system has msix disabled */
1241 bus_release_resource(dev, SYS_RES_MEMORY,
1243 sc->msix_mem = NULL;
1247 /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1248 auto_max_queues = min(mp_ncpus, available - 1);
1249 /* Clamp queues to # assigned to VF by PF */
1250 auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1252 /* Override with tunable value if tunable is less than autoconfig count */
1253 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1254 queues = ixlv_max_queues;
1255 /* Use autoconfig amount if that's lower */
1256 else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1257 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1258 "autoconfig amount (%d)...\n",
1259 ixlv_max_queues, auto_max_queues);
1260 queues = auto_max_queues;
1262 /* Limit maximum auto-configured queues to 8 if no user value is set */
1264 queues = min(auto_max_queues, 8);
1267 /* If we're doing RSS, clamp at the number of RSS buckets */
1268 if (queues > rss_getnumbuckets())
1269 queues = rss_getnumbuckets();
1273 ** Want one vector (RX/TX pair) per queue
1274 ** plus an additional for the admin queue.
1277 if (want <= available) /* Have enough */
1280 device_printf(sc->dev,
1281 "MSIX Configuration Problem, "
1282 "%d vectors available but %d wanted!\n",
1289 * If we're doing RSS, the number of queues needs to
1290 * match the number of RSS buckets that are configured.
1292 * + If there's more queues than RSS buckets, we'll end
1293 * up with queues that get no traffic.
1295 * + If there's more RSS buckets than queues, we'll end
1296 * up having multiple RSS buckets map to the same queue,
1297 * so there'll be some contention.
1299 if (queues != rss_getnumbuckets()) {
1301 "%s: queues (%d) != RSS buckets (%d)"
1302 "; performance will be impacted.\n",
1303 __func__, queues, rss_getnumbuckets());
1307 if (pci_alloc_msix(dev, &vectors) == 0) {
1308 device_printf(sc->dev,
1309 "Using MSIX interrupts with %d vectors\n", vectors);
1311 sc->vsi.num_queues = queues;
1314 /* Next we need to setup the vector for the Admin Queue */
1315 rid = 1; /* zero vector + 1 */
1316 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1317 &rid, RF_SHAREABLE | RF_ACTIVE);
1318 if (sc->res == NULL) {
1319 device_printf(dev, "Unable to allocate"
1320 " bus resource: AQ interrupt \n");
1323 if (bus_setup_intr(dev, sc->res,
1324 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1325 ixlv_msix_adminq, sc, &sc->tag)) {
1327 device_printf(dev, "Failed to register AQ handler");
1330 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1335 /* The VF driver MUST use MSIX */
1340 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1343 device_t dev = sc->dev;
1346 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1349 if (!(sc->pci_mem)) {
1350 device_printf(dev, "Unable to allocate bus resource: memory\n");
1354 sc->osdep.mem_bus_space_tag =
1355 rman_get_bustag(sc->pci_mem);
1356 sc->osdep.mem_bus_space_handle =
1357 rman_get_bushandle(sc->pci_mem);
1358 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1359 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1360 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1361 sc->hw.back = &sc->osdep;
1363 ixl_set_busmaster(dev);
1364 ixl_set_msix_enable(dev);
1366 /* Disable adminq interrupts (just in case) */
1367 ixlv_disable_adminq_irq(&sc->hw);
1373 * Free MSI-X related resources for a single queue
1376 ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
1378 device_t dev = sc->dev;
1381 ** Release all msix queue resources:
1383 if (que->tag != NULL) {
1384 bus_teardown_intr(dev, que->res, que->tag);
1387 if (que->res != NULL) {
1388 int rid = que->msix + 1;
1389 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1392 if (que->tq != NULL) {
1393 taskqueue_free(que->tq);
1399 ixlv_free_pci_resources(struct ixlv_sc *sc)
1401 device_t dev = sc->dev;
1403 pci_release_msi(dev);
1405 if (sc->msix_mem != NULL)
1406 bus_release_resource(dev, SYS_RES_MEMORY,
1407 PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1409 if (sc->pci_mem != NULL)
1410 bus_release_resource(dev, SYS_RES_MEMORY,
1411 PCIR_BAR(0), sc->pci_mem);
1415 * Create taskqueue and tasklet for Admin Queue interrupts.
1418 ixlv_init_taskqueue(struct ixlv_sc *sc)
1422 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1424 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1425 taskqueue_thread_enqueue, &sc->tq);
1426 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1427 device_get_nameunit(sc->dev));
1432 /*********************************************************************
1434 * Setup MSIX Interrupt resources and handlers for the VSI queues
1436 **********************************************************************/
1438 ixlv_assign_msix(struct ixlv_sc *sc)
1440 device_t dev = sc->dev;
1441 struct ixl_vsi *vsi = &sc->vsi;
1442 struct ixl_queue *que = vsi->queues;
1443 struct tx_ring *txr;
1444 int error, rid, vector = 1;
1449 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1453 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1454 RF_SHAREABLE | RF_ACTIVE);
1455 if (que->res == NULL) {
1456 device_printf(dev,"Unable to allocate"
1457 " bus resource: que interrupt [%d]\n", vector);
1460 /* Set the handler function */
1461 error = bus_setup_intr(dev, que->res,
1462 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1463 ixlv_msix_que, que, &que->tag);
1466 device_printf(dev, "Failed to register que handler");
1469 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1470 /* Bind the vector to a CPU */
1472 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1474 bus_bind_intr(dev, que->res, cpu_id);
1476 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1477 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1478 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1479 taskqueue_thread_enqueue, &que->tq);
1481 CPU_SETOF(cpu_id, &cpu_mask);
1482 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1483 &cpu_mask, "%s (bucket %d)",
1484 device_get_nameunit(dev), cpu_id);
1486 taskqueue_start_threads(&que->tq, 1, PI_NET,
1487 "%s que", device_get_nameunit(dev));
1496 ** Requests a VF reset from the PF.
1498 ** Requires the VF's Admin Queue to be initialized.
1501 ixlv_reset(struct ixlv_sc *sc)
1503 struct i40e_hw *hw = &sc->hw;
1504 device_t dev = sc->dev;
1507 /* Ask the PF to reset us if we are initiating */
1508 if (sc->init_state != IXLV_RESET_PENDING)
1509 ixlv_request_reset(sc);
1511 i40e_msec_pause(100);
1512 error = ixlv_reset_complete(hw);
1514 device_printf(dev, "%s: VF reset failed\n",
1519 error = i40e_shutdown_adminq(hw);
1521 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1526 error = i40e_init_adminq(hw);
1528 device_printf(dev, "%s: init_adminq failed: %d\n",
1537 ixlv_reset_complete(struct i40e_hw *hw)
1541 /* Wait up to ~10 seconds */
1542 for (int i = 0; i < 100; i++) {
1543 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1544 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1546 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1547 (reg == VIRTCHNL_VFR_COMPLETED))
1549 i40e_msec_pause(100);
1556 /*********************************************************************
1558 * Setup networking device structure and register an interface.
1560 **********************************************************************/
1562 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1565 struct ixl_vsi *vsi = &sc->vsi;
1566 struct ixl_queue *que = vsi->queues;
1568 INIT_DBG_DEV(dev, "begin");
1570 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1572 device_printf(dev, "%s: could not allocate ifnet"
1573 " structure!\n", __func__);
1577 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1579 ifp->if_mtu = ETHERMTU;
1580 #if __FreeBSD_version >= 1100000
1581 ifp->if_baudrate = IF_Gbps(40);
1583 if_initbaudrate(ifp, IF_Gbps(40));
1585 ifp->if_init = ixlv_init;
1586 ifp->if_softc = vsi;
1587 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1588 ifp->if_ioctl = ixlv_ioctl;
1590 #if __FreeBSD_version >= 1100000
1591 if_setgetcounterfn(ifp, ixl_get_counter);
1594 ifp->if_transmit = ixl_mq_start;
1596 ifp->if_qflush = ixl_qflush;
1597 ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1599 ether_ifattach(ifp, sc->hw.mac.addr);
1601 vsi->max_frame_size =
1602 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1603 + ETHER_VLAN_ENCAP_LEN;
1605 ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1606 ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1607 ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1610 * Tell the upper layer(s) we support long frames.
1612 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1614 ifp->if_capabilities |= IFCAP_HWCSUM;
1615 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1616 ifp->if_capabilities |= IFCAP_TSO;
1617 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1619 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1624 ifp->if_capenable = ifp->if_capabilities;
1627 ** Don't turn this on by default, if vlans are
1628 ** created on another pseudo device (eg. lagg)
1629 ** then vlan events are not passed thru, breaking
1630 ** operation, but with HW FILTER off it works. If
1631 ** using vlans directly on the ixl driver you can
1632 ** enable this and get full hardware tag filtering.
1634 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1637 * Specify the media types supported by this adapter and register
1638 * callbacks to update media and link information
1640 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1643 /* Media types based on reported link speed over AdminQ */
1644 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1645 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1646 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1647 ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1648 ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1650 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1651 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1653 INIT_DBG_DEV(dev, "end");
1658 ** Allocate and setup a single queue
1661 ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
1663 device_t dev = sc->dev;
1664 struct tx_ring *txr;
1665 struct rx_ring *rxr;
1667 int error = I40E_SUCCESS;
1671 txr->tail = I40E_QTX_TAIL1(que->me);
1672 /* Initialize the TX lock */
1673 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1674 device_get_nameunit(dev), que->me);
1675 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1677 * Create the TX descriptor ring
1679 * In Head Writeback mode, the descriptor ring is one bigger
1680 * than the number of descriptors for space for the HW to
1681 * write back index of last completed descriptor.
1683 if (sc->vsi.enable_head_writeback) {
1684 tsize = roundup2((que->num_tx_desc *
1685 sizeof(struct i40e_tx_desc)) +
1686 sizeof(u32), DBA_ALIGN);
1688 tsize = roundup2((que->num_tx_desc *
1689 sizeof(struct i40e_tx_desc)), DBA_ALIGN);
1691 if (i40e_allocate_dma_mem(&sc->hw,
1692 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1694 "Unable to allocate TX Descriptor memory\n");
1696 goto err_destroy_tx_mtx;
1698 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1699 bzero((void *)txr->base, tsize);
1700 /* Now allocate transmit soft structs for the ring */
1701 if (ixl_allocate_tx_data(que)) {
1703 "Critical Failure setting up TX structures\n");
1705 goto err_free_tx_dma;
1707 /* Allocate a buf ring */
1708 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1709 M_WAITOK, &txr->mtx);
1710 if (txr->br == NULL) {
1712 "Critical Failure setting up TX buf ring\n");
1714 goto err_free_tx_data;
1718 * Next the RX queues...
1720 rsize = roundup2(que->num_rx_desc *
1721 sizeof(union i40e_rx_desc), DBA_ALIGN);
1724 rxr->tail = I40E_QRX_TAIL1(que->me);
1726 /* Initialize the RX side lock */
1727 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1728 device_get_nameunit(dev), que->me);
1729 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1731 if (i40e_allocate_dma_mem(&sc->hw,
1732 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1734 "Unable to allocate RX Descriptor memory\n");
1736 goto err_destroy_rx_mtx;
1738 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1739 bzero((void *)rxr->base, rsize);
1741 /* Allocate receive soft structs for the ring */
1742 if (ixl_allocate_rx_data(que)) {
1744 "Critical Failure setting up receive structs\n");
1746 goto err_free_rx_dma;
1752 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1754 mtx_destroy(&rxr->mtx);
1755 /* err_free_tx_buf_ring */
1756 buf_ring_free(txr->br, M_DEVBUF);
1758 ixl_free_que_tx(que);
1760 i40e_free_dma_mem(&sc->hw, &txr->dma);
1762 mtx_destroy(&txr->mtx);
1768 ** Allocate and setup the interface queues
1771 ixlv_setup_queues(struct ixlv_sc *sc)
1773 device_t dev = sc->dev;
1774 struct ixl_vsi *vsi;
1775 struct ixl_queue *que;
1777 int error = I40E_SUCCESS;
1780 vsi->back = (void *)sc;
1784 /* Get memory for the station queues */
1786 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1787 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1788 device_printf(dev, "Unable to allocate queue memory\n");
1792 for (i = 0; i < vsi->num_queues; i++) {
1793 que = &vsi->queues[i];
1794 que->num_tx_desc = vsi->num_tx_desc;
1795 que->num_rx_desc = vsi->num_rx_desc;
1799 if (ixlv_setup_queue(sc, que)) {
1801 goto err_free_queues;
1809 ixlv_free_queue(sc, &vsi->queues[i]);
1811 free(vsi->queues, M_DEVBUF);
1817 ** This routine is run via an vlan config EVENT,
1818 ** it enables us to use the HW Filter table since
1819 ** we can get the vlan id. This just creates the
1820 ** entry in the soft version of the VFTA, init will
1821 ** repopulate the real table.
1824 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1826 struct ixl_vsi *vsi = arg;
1827 struct ixlv_sc *sc = vsi->back;
1828 struct ixlv_vlan_filter *v;
1831 if (ifp->if_softc != arg) /* Not our event */
1834 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1837 /* Sanity check - make sure it doesn't already exist */
1838 SLIST_FOREACH(v, sc->vlan_filters, next) {
1839 if (v->vlan == vtag)
1845 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1846 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1848 v->flags = IXL_FILTER_ADD;
1849 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1850 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1851 mtx_unlock(&sc->mtx);
1856 ** This routine is run via an vlan
1857 ** unconfig EVENT, remove our entry
1858 ** in the soft vfta.
1861 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1863 struct ixl_vsi *vsi = arg;
1864 struct ixlv_sc *sc = vsi->back;
1865 struct ixlv_vlan_filter *v;
1868 if (ifp->if_softc != arg)
1871 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1875 SLIST_FOREACH(v, sc->vlan_filters, next) {
1876 if (v->vlan == vtag) {
1877 v->flags = IXL_FILTER_DEL;
1883 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1884 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1885 mtx_unlock(&sc->mtx);
1890 ** Get a new filter and add it to the mac filter list.
1892 static struct ixlv_mac_filter *
1893 ixlv_get_mac_filter(struct ixlv_sc *sc)
1895 struct ixlv_mac_filter *f;
1897 f = malloc(sizeof(struct ixlv_mac_filter),
1898 M_DEVBUF, M_NOWAIT | M_ZERO);
1900 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1906 ** Find the filter with matching MAC address
1908 static struct ixlv_mac_filter *
1909 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1911 struct ixlv_mac_filter *f;
1914 SLIST_FOREACH(f, sc->mac_filters, next) {
1915 if (cmp_etheraddr(f->macaddr, macaddr)) {
1927 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1929 device_t dev = sc->dev;
1932 if (sc->tag != NULL) {
1933 bus_teardown_intr(dev, sc->res, sc->tag);
1935 device_printf(dev, "bus_teardown_intr() for"
1936 " interrupt 0 failed\n");
1941 if (sc->res != NULL) {
1942 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1944 device_printf(dev, "bus_release_resource() for"
1945 " interrupt 0 failed\n");
1956 ** Admin Queue interrupt handler
1959 ixlv_msix_adminq(void *arg)
1961 struct ixlv_sc *sc = arg;
1962 struct i40e_hw *hw = &sc->hw;
1965 reg = rd32(hw, I40E_VFINT_ICR01);
1966 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1968 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1969 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1970 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1973 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1978 ixlv_enable_intr(struct ixl_vsi *vsi)
1980 struct i40e_hw *hw = vsi->hw;
1981 struct ixl_queue *que = vsi->queues;
1983 ixlv_enable_adminq_irq(hw);
1984 for (int i = 0; i < vsi->num_queues; i++, que++)
1985 ixlv_enable_queue_irq(hw, que->me);
1989 ixlv_disable_intr(struct ixl_vsi *vsi)
1991 struct i40e_hw *hw = vsi->hw;
1992 struct ixl_queue *que = vsi->queues;
1994 ixlv_disable_adminq_irq(hw);
1995 for (int i = 0; i < vsi->num_queues; i++, que++)
1996 ixlv_disable_queue_irq(hw, que->me);
2001 ixlv_disable_adminq_irq(struct i40e_hw *hw)
2003 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
2004 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
2006 rd32(hw, I40E_VFGEN_RSTAT);
2011 ixlv_enable_adminq_irq(struct i40e_hw *hw)
2013 wr32(hw, I40E_VFINT_DYN_CTL01,
2014 I40E_VFINT_DYN_CTL01_INTENA_MASK |
2015 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
2016 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
2018 rd32(hw, I40E_VFGEN_RSTAT);
2023 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
2027 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2028 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
2029 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
2030 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
2034 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
2036 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
2037 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2038 rd32(hw, I40E_VFGEN_RSTAT);
2043 * Get initial ITR values from tunable values.
2046 ixlv_configure_itr(struct ixlv_sc *sc)
2048 struct i40e_hw *hw = &sc->hw;
2049 struct ixl_vsi *vsi = &sc->vsi;
2050 struct ixl_queue *que = vsi->queues;
2052 vsi->rx_itr_setting = ixlv_rx_itr;
2053 vsi->tx_itr_setting = ixlv_tx_itr;
2055 for (int i = 0; i < vsi->num_queues; i++, que++) {
2056 struct tx_ring *txr = &que->txr;
2057 struct rx_ring *rxr = &que->rxr;
2059 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
2060 vsi->rx_itr_setting);
2061 rxr->itr = vsi->rx_itr_setting;
2062 rxr->latency = IXL_AVE_LATENCY;
2064 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
2065 vsi->tx_itr_setting);
2066 txr->itr = vsi->tx_itr_setting;
2067 txr->latency = IXL_AVE_LATENCY;
2072 ** Provide a update to the queue RX
2073 ** interrupt moderation value.
2076 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2078 struct ixl_vsi *vsi = que->vsi;
2079 struct i40e_hw *hw = vsi->hw;
2080 struct rx_ring *rxr = &que->rxr;
2086 /* Idle, do nothing */
2087 if (rxr->bytes == 0)
2090 if (ixlv_dynamic_rx_itr) {
2091 rx_bytes = rxr->bytes/rxr->itr;
2094 /* Adjust latency range */
2095 switch (rxr->latency) {
2096 case IXL_LOW_LATENCY:
2097 if (rx_bytes > 10) {
2098 rx_latency = IXL_AVE_LATENCY;
2099 rx_itr = IXL_ITR_20K;
2102 case IXL_AVE_LATENCY:
2103 if (rx_bytes > 20) {
2104 rx_latency = IXL_BULK_LATENCY;
2105 rx_itr = IXL_ITR_8K;
2106 } else if (rx_bytes <= 10) {
2107 rx_latency = IXL_LOW_LATENCY;
2108 rx_itr = IXL_ITR_100K;
2111 case IXL_BULK_LATENCY:
2112 if (rx_bytes <= 20) {
2113 rx_latency = IXL_AVE_LATENCY;
2114 rx_itr = IXL_ITR_20K;
2119 rxr->latency = rx_latency;
2121 if (rx_itr != rxr->itr) {
2122 /* do an exponential smoothing */
2123 rx_itr = (10 * rx_itr * rxr->itr) /
2124 ((9 * rx_itr) + rxr->itr);
2125 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2126 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2127 que->me), rxr->itr);
2129 } else { /* We may have have toggled to non-dynamic */
2130 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2131 vsi->rx_itr_setting = ixlv_rx_itr;
2132 /* Update the hardware if needed */
2133 if (rxr->itr != vsi->rx_itr_setting) {
2134 rxr->itr = vsi->rx_itr_setting;
2135 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2136 que->me), rxr->itr);
2146 ** Provide a update to the queue TX
2147 ** interrupt moderation value.
2150 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2152 struct ixl_vsi *vsi = que->vsi;
2153 struct i40e_hw *hw = vsi->hw;
2154 struct tx_ring *txr = &que->txr;
2160 /* Idle, do nothing */
2161 if (txr->bytes == 0)
2164 if (ixlv_dynamic_tx_itr) {
2165 tx_bytes = txr->bytes/txr->itr;
2168 switch (txr->latency) {
2169 case IXL_LOW_LATENCY:
2170 if (tx_bytes > 10) {
2171 tx_latency = IXL_AVE_LATENCY;
2172 tx_itr = IXL_ITR_20K;
2175 case IXL_AVE_LATENCY:
2176 if (tx_bytes > 20) {
2177 tx_latency = IXL_BULK_LATENCY;
2178 tx_itr = IXL_ITR_8K;
2179 } else if (tx_bytes <= 10) {
2180 tx_latency = IXL_LOW_LATENCY;
2181 tx_itr = IXL_ITR_100K;
2184 case IXL_BULK_LATENCY:
2185 if (tx_bytes <= 20) {
2186 tx_latency = IXL_AVE_LATENCY;
2187 tx_itr = IXL_ITR_20K;
2192 txr->latency = tx_latency;
2194 if (tx_itr != txr->itr) {
2195 /* do an exponential smoothing */
2196 tx_itr = (10 * tx_itr * txr->itr) /
2197 ((9 * tx_itr) + txr->itr);
2198 txr->itr = min(tx_itr, IXL_MAX_ITR);
2199 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2200 que->me), txr->itr);
2203 } else { /* We may have have toggled to non-dynamic */
2204 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2205 vsi->tx_itr_setting = ixlv_tx_itr;
2206 /* Update the hardware if needed */
2207 if (txr->itr != vsi->tx_itr_setting) {
2208 txr->itr = vsi->tx_itr_setting;
2209 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2210 que->me), txr->itr);
2221 ** MSIX Interrupt Handlers and Tasklets
2225 ixlv_handle_que(void *context, int pending)
2227 struct ixl_queue *que = context;
2228 struct ixl_vsi *vsi = que->vsi;
2229 struct i40e_hw *hw = vsi->hw;
2230 struct tx_ring *txr = &que->txr;
2231 struct ifnet *ifp = vsi->ifp;
2234 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2235 more = ixl_rxeof(que, IXL_RX_LIMIT);
2236 mtx_lock(&txr->mtx);
2238 if (!drbr_empty(ifp, txr->br))
2239 ixl_mq_start_locked(ifp, txr);
2240 mtx_unlock(&txr->mtx);
2242 taskqueue_enqueue(que->tq, &que->task);
2247 /* Reenable this interrupt - hmmm */
2248 ixlv_enable_queue_irq(hw, que->me);
2253 /*********************************************************************
2255 * MSIX Queue Interrupt Service routine
2257 **********************************************************************/
2259 ixlv_msix_que(void *arg)
2261 struct ixl_queue *que = arg;
2262 struct ixl_vsi *vsi = que->vsi;
2263 struct i40e_hw *hw = vsi->hw;
2264 struct tx_ring *txr = &que->txr;
2265 bool more_tx, more_rx;
2267 /* Spurious interrupts are ignored */
2268 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2273 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2275 mtx_lock(&txr->mtx);
2276 more_tx = ixl_txeof(que);
2278 ** Make certain that if the stack
2279 ** has anything queued the task gets
2280 ** scheduled to handle it.
2282 if (!drbr_empty(vsi->ifp, txr->br))
2284 mtx_unlock(&txr->mtx);
2286 ixlv_set_queue_rx_itr(que);
2287 ixlv_set_queue_tx_itr(que);
2289 if (more_tx || more_rx)
2290 taskqueue_enqueue(que->tq, &que->task);
2292 ixlv_enable_queue_irq(hw, que->me);
2298 /*********************************************************************
2300 * Media Ioctl callback
2302 * This routine is called whenever the user queries the status of
2303 * the interface using ifconfig.
2305 **********************************************************************/
2307 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2309 struct ixl_vsi *vsi = ifp->if_softc;
2310 struct ixlv_sc *sc = vsi->back;
2312 INIT_DBG_IF(ifp, "begin");
2316 ixlv_update_link_status(sc);
2318 ifmr->ifm_status = IFM_AVALID;
2319 ifmr->ifm_active = IFM_ETHER;
2322 mtx_unlock(&sc->mtx);
2323 INIT_DBG_IF(ifp, "end: link not up");
2327 ifmr->ifm_status |= IFM_ACTIVE;
2328 /* Hardware is always full-duplex */
2329 ifmr->ifm_active |= IFM_FDX;
2331 /* Based on the link speed reported by the PF over the AdminQ, choose a
2332 * PHY type to report. This isn't 100% correct since we don't really
2333 * know the underlying PHY type of the PF, but at least we can report
2334 * a valid link speed...
2336 switch (sc->link_speed) {
2337 case VIRTCHNL_LINK_SPEED_100MB:
2338 ifmr->ifm_active |= IFM_100_TX;
2340 case VIRTCHNL_LINK_SPEED_1GB:
2341 ifmr->ifm_active |= IFM_1000_T;
2343 case VIRTCHNL_LINK_SPEED_10GB:
2344 ifmr->ifm_active |= IFM_10G_SR;
2346 case VIRTCHNL_LINK_SPEED_20GB:
2347 case VIRTCHNL_LINK_SPEED_25GB:
2348 ifmr->ifm_active |= IFM_25G_SR;
2350 case VIRTCHNL_LINK_SPEED_40GB:
2351 ifmr->ifm_active |= IFM_40G_SR4;
2354 ifmr->ifm_active |= IFM_UNKNOWN;
2358 mtx_unlock(&sc->mtx);
2359 INIT_DBG_IF(ifp, "end");
2363 /*********************************************************************
2365 * Media Ioctl callback
2367 * This routine is called when the user changes speed/duplex using
2368 * media/mediopt option with ifconfig.
2370 **********************************************************************/
2372 ixlv_media_change(struct ifnet * ifp)
2374 struct ixl_vsi *vsi = ifp->if_softc;
2375 struct ifmedia *ifm = &vsi->media;
2377 INIT_DBG_IF(ifp, "begin");
2379 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2382 if_printf(ifp, "Changing speed is not supported\n");
2384 INIT_DBG_IF(ifp, "end");
2389 /*********************************************************************
2390 * Multicast Initialization
2392 * This routine is called by init to reset a fresh state.
2394 **********************************************************************/
2397 ixlv_init_multi(struct ixl_vsi *vsi)
2399 struct ixlv_mac_filter *f;
2400 struct ixlv_sc *sc = vsi->back;
2403 IOCTL_DBG_IF(vsi->ifp, "begin");
2405 /* First clear any multicast filters */
2406 SLIST_FOREACH(f, sc->mac_filters, next) {
2407 if ((f->flags & IXL_FILTER_USED)
2408 && (f->flags & IXL_FILTER_MC)) {
2409 f->flags |= IXL_FILTER_DEL;
2414 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2415 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2418 IOCTL_DBG_IF(vsi->ifp, "end");
2422 ixlv_add_multi(struct ixl_vsi *vsi)
2424 struct ifmultiaddr *ifma;
2425 struct ifnet *ifp = vsi->ifp;
2426 struct ixlv_sc *sc = vsi->back;
2429 IOCTL_DBG_IF(ifp, "begin");
2431 if_maddr_rlock(ifp);
2433 ** Get a count, to decide if we
2434 ** simply use multicast promiscuous.
2436 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2437 if (ifma->ifma_addr->sa_family != AF_LINK)
2441 if_maddr_runlock(ifp);
2443 /* TODO: Remove -- cannot set promiscuous mode in a VF */
2444 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2445 /* delete all multicast filters */
2446 ixlv_init_multi(vsi);
2447 sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
2448 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2449 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2451 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2456 if_maddr_rlock(ifp);
2457 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2458 if (ifma->ifma_addr->sa_family != AF_LINK)
2460 if (!ixlv_add_mac_filter(sc,
2461 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2465 if_maddr_runlock(ifp);
2467 ** Notify AQ task that sw filters need to be
2471 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2472 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2475 IOCTL_DBG_IF(ifp, "end");
2479 ixlv_del_multi(struct ixl_vsi *vsi)
2481 struct ixlv_mac_filter *f;
2482 struct ifmultiaddr *ifma;
2483 struct ifnet *ifp = vsi->ifp;
2484 struct ixlv_sc *sc = vsi->back;
2488 IOCTL_DBG_IF(ifp, "begin");
2490 /* Search for removed multicast addresses */
2491 if_maddr_rlock(ifp);
2492 SLIST_FOREACH(f, sc->mac_filters, next) {
2493 if ((f->flags & IXL_FILTER_USED)
2494 && (f->flags & IXL_FILTER_MC)) {
2495 /* check if mac address in filter is in sc's list */
2497 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2498 if (ifma->ifma_addr->sa_family != AF_LINK)
2501 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2502 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2507 /* if this filter is not in the sc's list, remove it */
2508 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2509 f->flags |= IXL_FILTER_DEL;
2511 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2512 MAC_FORMAT_ARGS(f->macaddr));
2514 else if (match == FALSE)
2515 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2516 MAC_FORMAT_ARGS(f->macaddr));
2519 if_maddr_runlock(ifp);
2522 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2523 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2526 IOCTL_DBG_IF(ifp, "end");
2529 /*********************************************************************
2532 * This routine checks for link status,updates statistics,
2533 * and runs the watchdog check.
2535 **********************************************************************/
2538 ixlv_local_timer(void *arg)
2540 struct ixlv_sc *sc = arg;
2541 struct i40e_hw *hw = &sc->hw;
2542 struct ixl_vsi *vsi = &sc->vsi;
2545 IXLV_CORE_LOCK_ASSERT(sc);
2547 /* If Reset is in progress just bail */
2548 if (sc->init_state == IXLV_RESET_PENDING)
2551 /* Check for when PF triggers a VF reset */
2552 val = rd32(hw, I40E_VFGEN_RSTAT) &
2553 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2555 if (val != VIRTCHNL_VFR_VFACTIVE
2556 && val != VIRTCHNL_VFR_COMPLETED) {
2557 DDPRINTF(sc->dev, "reset in progress! (%d)", val);
2561 ixlv_request_stats(sc);
2563 /* clean and process any events */
2564 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2566 /* Increment stat when a queue shows hung */
2567 if (ixl_queue_hang_check(vsi))
2568 sc->watchdog_events++;
2570 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2574 ** Note: this routine updates the OS on the link state
2575 ** the real check of the hardware only happens with
2576 ** a link interrupt.
2579 ixlv_update_link_status(struct ixlv_sc *sc)
2581 struct ixl_vsi *vsi = &sc->vsi;
2582 struct ifnet *ifp = vsi->ifp;
2585 if (vsi->link_active == FALSE) {
2587 if_printf(ifp,"Link is Up, %s\n",
2588 ixlv_vc_speed_to_string(sc->link_speed));
2589 vsi->link_active = TRUE;
2590 if_link_state_change(ifp, LINK_STATE_UP);
2592 } else { /* Link down */
2593 if (vsi->link_active == TRUE) {
2595 if_printf(ifp,"Link is Down\n");
2596 if_link_state_change(ifp, LINK_STATE_DOWN);
2597 vsi->link_active = FALSE;
2604 /*********************************************************************
2606 * This routine disables all traffic on the adapter by issuing a
2607 * global reset on the MAC and deallocates TX/RX buffers.
2609 **********************************************************************/
2612 ixlv_stop(struct ixlv_sc *sc)
2618 INIT_DBG_IF(ifp, "begin");
2620 IXLV_CORE_LOCK_ASSERT(sc);
2622 ixl_vc_flush(&sc->vc_mgr);
2623 ixlv_disable_queues(sc);
2626 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2627 ((ticks - start) < hz/10))
2628 ixlv_do_adminq_locked(sc);
2630 /* Stop the local timer */
2631 callout_stop(&sc->timer);
2633 INIT_DBG_IF(ifp, "end");
2636 /* Free a single queue struct */
2638 ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
2640 struct tx_ring *txr = &que->txr;
2641 struct rx_ring *rxr = &que->rxr;
2643 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2647 buf_ring_free(txr->br, M_DEVBUF);
2648 ixl_free_que_tx(que);
2650 i40e_free_dma_mem(&sc->hw, &txr->dma);
2652 IXL_TX_LOCK_DESTROY(txr);
2654 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2657 ixl_free_que_rx(que);
2659 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2661 IXL_RX_LOCK_DESTROY(rxr);
2664 /*********************************************************************
2666 * Free all station queue structs.
2668 **********************************************************************/
2670 ixlv_free_queues(struct ixl_vsi *vsi)
2672 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2673 struct ixl_queue *que = vsi->queues;
2675 for (int i = 0; i < vsi->num_queues; i++, que++) {
2676 /* First, free the MSI-X resources */
2677 ixlv_free_msix_resources(sc, que);
2678 /* Then free other queue data */
2679 ixlv_free_queue(sc, que);
2682 free(vsi->queues, M_DEVBUF);
2686 ixlv_config_rss_reg(struct ixlv_sc *sc)
2688 struct i40e_hw *hw = &sc->hw;
2689 struct ixl_vsi *vsi = &sc->vsi;
2691 u64 set_hena = 0, hena;
2693 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2695 u32 rss_hash_config;
2698 /* Don't set up RSS if using a single queue */
2699 if (vsi->num_queues == 1) {
2700 wr32(hw, I40E_VFQF_HENA(0), 0);
2701 wr32(hw, I40E_VFQF_HENA(1), 0);
2707 /* Fetch the configured RSS key */
2708 rss_getkey((uint8_t *) &rss_seed);
2710 ixl_get_default_rss_key(rss_seed);
2713 /* Fill out hash function seed */
2714 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2715 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2717 /* Enable PCTYPES for RSS: */
2719 rss_hash_config = rss_gethashconfig();
2720 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2721 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2722 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2723 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2724 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2725 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2726 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2727 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2728 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2729 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2730 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2731 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2732 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2733 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2735 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2737 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2738 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2740 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2741 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2743 /* Populate the LUT with max no. of queues in round robin fashion */
2744 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2745 if (j == vsi->num_queues)
2749 * Fetch the RSS bucket id for the given indirection entry.
2750 * Cap it at the number of configured buckets (which is
2753 que_id = rss_get_indirection_to_bucket(i);
2754 que_id = que_id % vsi->num_queues;
2758 /* lut = 4-byte sliding window of 4 lut entries */
2759 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2760 /* On i = 3, we have 4 entries in lut; write to the register */
2762 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2763 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2770 ixlv_config_rss_pf(struct ixlv_sc *sc)
2772 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2773 IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2775 ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2776 IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2778 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2779 IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2783 ** ixlv_config_rss - setup RSS
2785 ** RSS keys and table are cleared on VF reset.
2788 ixlv_config_rss(struct ixlv_sc *sc)
2790 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2791 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2792 ixlv_config_rss_reg(sc);
2793 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2794 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2795 ixlv_config_rss_pf(sc);
2797 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2801 ** This routine refreshes vlan filters, called by init
2802 ** it scans the filter table and then updates the AQ
2805 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2807 struct ixl_vsi *vsi = &sc->vsi;
2808 struct ixlv_vlan_filter *f;
2811 if (vsi->num_vlans == 0)
2814 ** Scan the filter table for vlan entries,
2815 ** and if found call for the AQ update.
2817 SLIST_FOREACH(f, sc->vlan_filters, next)
2818 if (f->flags & IXL_FILTER_ADD)
2821 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2822 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2827 ** This routine adds new MAC filters to the sc's list;
2828 ** these are later added in hardware by sending a virtual
2832 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2834 struct ixlv_mac_filter *f;
2836 /* Does one already exist? */
2837 f = ixlv_find_mac_filter(sc, macaddr);
2839 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2840 MAC_FORMAT_ARGS(macaddr));
2844 /* If not, get a new empty filter */
2845 f = ixlv_get_mac_filter(sc);
2847 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2852 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2853 MAC_FORMAT_ARGS(macaddr));
2855 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2856 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2862 ** Marks a MAC filter for deletion.
2865 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2867 struct ixlv_mac_filter *f;
2869 f = ixlv_find_mac_filter(sc, macaddr);
2873 f->flags |= IXL_FILTER_DEL;
2878 ** Tasklet handler for MSIX Adminq interrupts
2879 ** - done outside interrupt context since it might sleep
2882 ixlv_do_adminq(void *context, int pending)
2884 struct ixlv_sc *sc = context;
2887 ixlv_do_adminq_locked(sc);
2888 mtx_unlock(&sc->mtx);
2893 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2895 struct i40e_hw *hw = &sc->hw;
2896 struct i40e_arq_event_info event;
2897 struct virtchnl_msg *v_msg;
2898 device_t dev = sc->dev;
2902 bool aq_error = false;
2904 IXLV_CORE_LOCK_ASSERT(sc);
2906 event.buf_len = IXL_AQ_BUF_SZ;
2907 event.msg_buf = sc->aq_buffer;
2908 v_msg = (struct virtchnl_msg *)&event.desc;
2911 ret = i40e_clean_arq_element(hw, &event, &result);
2914 ixlv_vc_completion(sc, v_msg->v_opcode,
2915 v_msg->v_retval, event.msg_buf, event.msg_len);
2917 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2920 /* check for Admin queue errors */
2921 oldreg = reg = rd32(hw, hw->aq.arq.len);
2922 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2923 device_printf(dev, "ARQ VF Error detected\n");
2924 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2927 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2928 device_printf(dev, "ARQ Overflow Error detected\n");
2929 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2932 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2933 device_printf(dev, "ARQ Critical Error detected\n");
2934 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2938 wr32(hw, hw->aq.arq.len, reg);
2940 oldreg = reg = rd32(hw, hw->aq.asq.len);
2941 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2942 device_printf(dev, "ASQ VF Error detected\n");
2943 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2946 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2947 device_printf(dev, "ASQ Overflow Error detected\n");
2948 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2951 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2952 device_printf(dev, "ASQ Critical Error detected\n");
2953 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2957 wr32(hw, hw->aq.asq.len, reg);
2960 /* Need to reset adapter */
2961 device_printf(dev, "WARNING: Resetting!\n");
2962 sc->init_state = IXLV_RESET_REQUIRED;
2964 ixlv_init_locked(sc);
2966 ixlv_enable_adminq_irq(hw);
2970 ixlv_add_sysctls(struct ixlv_sc *sc)
2972 device_t dev = sc->dev;
2973 struct ixl_vsi *vsi = &sc->vsi;
2974 struct i40e_eth_stats *es = &vsi->eth_stats;
2976 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2977 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2978 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2980 struct sysctl_oid *vsi_node, *queue_node;
2981 struct sysctl_oid_list *vsi_list, *queue_list;
2983 #define QUEUE_NAME_LEN 32
2984 char queue_namebuf[QUEUE_NAME_LEN];
2986 struct ixl_queue *queues = vsi->queues;
2987 struct tx_ring *txr;
2988 struct rx_ring *rxr;
2990 /* Driver statistics sysctls */
2991 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2992 CTLFLAG_RD, &sc->watchdog_events,
2993 "Watchdog timeouts");
2994 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2995 CTLFLAG_RD, &sc->admin_irq,
2996 "Admin Queue IRQ Handled");
2998 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
2999 CTLFLAG_RD, &vsi->num_tx_desc, 0,
3001 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
3002 CTLFLAG_RD, &vsi->num_rx_desc, 0,
3005 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
3006 CTLTYPE_STRING | CTLFLAG_RD,
3007 sc, 0, ixlv_sysctl_current_speed,
3008 "A", "Current Port Speed");
3010 /* VSI statistics sysctls */
3011 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3012 CTLFLAG_RD, NULL, "VSI-specific statistics");
3013 vsi_list = SYSCTL_CHILDREN(vsi_node);
3015 struct ixl_sysctl_info ctls[] =
3017 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3018 {&es->rx_unicast, "ucast_pkts_rcvd",
3019 "Unicast Packets Received"},
3020 {&es->rx_multicast, "mcast_pkts_rcvd",
3021 "Multicast Packets Received"},
3022 {&es->rx_broadcast, "bcast_pkts_rcvd",
3023 "Broadcast Packets Received"},
3024 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
3025 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
3026 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3027 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3028 {&es->tx_multicast, "mcast_pkts_txd",
3029 "Multicast Packets Transmitted"},
3030 {&es->tx_broadcast, "bcast_pkts_txd",
3031 "Broadcast Packets Transmitted"},
3032 {&es->tx_errors, "tx_errors", "TX packet errors"},
3036 struct ixl_sysctl_info *entry = ctls;
3037 while (entry->stat != NULL)
3039 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
3040 CTLFLAG_RD, entry->stat,
3041 entry->description);
3046 for (int q = 0; q < vsi->num_queues; q++) {
3047 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3048 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3049 CTLFLAG_RD, NULL, "Queue Name");
3050 queue_list = SYSCTL_CHILDREN(queue_node);
3052 txr = &(queues[q].txr);
3053 rxr = &(queues[q].rxr);
3055 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3056 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3057 "m_defrag() failed");
3058 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
3059 CTLFLAG_RD, &(queues[q].dropped_pkts),
3060 "Driver dropped packets");
3061 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
3062 CTLFLAG_RD, &(queues[q].irqs),
3063 "irqs on this queue");
3064 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3065 CTLFLAG_RD, &(queues[q].tso),
3067 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
3068 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
3069 "Driver tx dma failure in xmit");
3070 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3071 CTLFLAG_RD, &(txr->no_desc),
3072 "Queue No Descriptor Available");
3073 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3074 CTLFLAG_RD, &(txr->total_packets),
3075 "Queue Packets Transmitted");
3076 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3077 CTLFLAG_RD, &(txr->tx_bytes),
3078 "Queue Bytes Transmitted");
3079 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3080 CTLFLAG_RD, &(rxr->rx_packets),
3081 "Queue Packets Received");
3082 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3083 CTLFLAG_RD, &(rxr->rx_bytes),
3084 "Queue Bytes Received");
3085 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3086 CTLFLAG_RD, &(rxr->itr), 0,
3087 "Queue Rx ITR Interval");
3088 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3089 CTLFLAG_RD, &(txr->itr), 0,
3090 "Queue Tx ITR Interval");
3093 /* Examine queue state */
3094 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
3095 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3096 sizeof(struct ixl_queue),
3097 ixlv_sysctl_qtx_tail_handler, "IU",
3098 "Queue Transmit Descriptor Tail");
3099 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
3100 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3101 sizeof(struct ixl_queue),
3102 ixlv_sysctl_qrx_tail_handler, "IU",
3103 "Queue Receive Descriptor Tail");
3104 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3105 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3106 "Ticks before watchdog event is triggered");
3112 ixlv_init_filters(struct ixlv_sc *sc)
3114 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3115 M_DEVBUF, M_NOWAIT | M_ZERO);
3116 SLIST_INIT(sc->mac_filters);
3117 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3118 M_DEVBUF, M_NOWAIT | M_ZERO);
3119 SLIST_INIT(sc->vlan_filters);
3124 ixlv_free_filters(struct ixlv_sc *sc)
3126 struct ixlv_mac_filter *f;
3127 struct ixlv_vlan_filter *v;
3129 while (!SLIST_EMPTY(sc->mac_filters)) {
3130 f = SLIST_FIRST(sc->mac_filters);
3131 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3134 free(sc->mac_filters, M_DEVBUF);
3135 while (!SLIST_EMPTY(sc->vlan_filters)) {
3136 v = SLIST_FIRST(sc->vlan_filters);
3137 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3140 free(sc->vlan_filters, M_DEVBUF);
3145 ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
3159 switch (link_speed) {
3160 case VIRTCHNL_LINK_SPEED_100MB:
3163 case VIRTCHNL_LINK_SPEED_1GB:
3166 case VIRTCHNL_LINK_SPEED_10GB:
3169 case VIRTCHNL_LINK_SPEED_40GB:
3172 case VIRTCHNL_LINK_SPEED_20GB:
3175 case VIRTCHNL_LINK_SPEED_25GB:
3178 case VIRTCHNL_LINK_SPEED_UNKNOWN:
3184 return speeds[index];
3188 ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3190 struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
3193 error = sysctl_handle_string(oidp,
3194 ixlv_vc_speed_to_string(sc->link_speed),
3201 * ixlv_sysctl_qtx_tail_handler
3202 * Retrieves I40E_QTX_TAIL1 value from hardware
3206 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3208 struct ixl_queue *que;
3212 que = ((struct ixl_queue *)oidp->oid_arg1);
3215 val = rd32(que->vsi->hw, que->txr.tail);
3216 error = sysctl_handle_int(oidp, &val, 0, req);
3217 if (error || !req->newptr)
3223 * ixlv_sysctl_qrx_tail_handler
3224 * Retrieves I40E_QRX_TAIL1 value from hardware
3228 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3230 struct ixl_queue *que;
3234 que = ((struct ixl_queue *)oidp->oid_arg1);
3237 val = rd32(que->vsi->hw, que->rxr.tail);
3238 error = sysctl_handle_int(oidp, &val, 0, req);
3239 if (error || !req->newptr)