1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
38 /*********************************************************************
40 *********************************************************************/
41 char ixlv_driver_version[] = "1.4.12-k";
43 /*********************************************************************
46 * Used by probe to select devices to load on
47 * Last field stores an index into ixlv_strings
48 * Last entry must be all 0s
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
51 *********************************************************************/
53 static ixl_vendor_info_t ixlv_vendor_info_array[] =
55 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
56 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
57 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixlv_strings[] = {
67 "Intel(R) Ethernet Connection XL710/X722 VF Driver"
71 /*********************************************************************
73 *********************************************************************/
74 static int ixlv_probe(device_t);
75 static int ixlv_attach(device_t);
76 static int ixlv_detach(device_t);
77 static int ixlv_shutdown(device_t);
78 static void ixlv_init_locked(struct ixlv_sc *);
79 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
80 static void ixlv_free_pci_resources(struct ixlv_sc *);
81 static int ixlv_assign_msix(struct ixlv_sc *);
82 static int ixlv_init_msix(struct ixlv_sc *);
83 static int ixlv_init_taskqueue(struct ixlv_sc *);
84 static int ixlv_setup_queues(struct ixlv_sc *);
85 static void ixlv_config_rss(struct ixlv_sc *);
86 static void ixlv_stop(struct ixlv_sc *);
87 static void ixlv_add_multi(struct ixl_vsi *);
88 static void ixlv_del_multi(struct ixl_vsi *);
89 static void ixlv_free_queues(struct ixl_vsi *);
90 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
91 static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
93 static int ixlv_media_change(struct ifnet *);
94 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
96 static void ixlv_local_timer(void *);
98 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void ixlv_init_filters(struct ixlv_sc *);
101 static void ixlv_free_filters(struct ixlv_sc *);
103 static void ixlv_msix_que(void *);
104 static void ixlv_msix_adminq(void *);
105 static void ixlv_do_adminq(void *, int);
106 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void ixlv_handle_que(void *, int);
108 static int ixlv_reset(struct ixlv_sc *);
109 static int ixlv_reset_complete(struct i40e_hw *);
110 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113 enum i40e_status_code);
114 static void ixlv_configure_itr(struct ixlv_sc *);
116 static void ixlv_enable_adminq_irq(struct i40e_hw *);
117 static void ixlv_disable_adminq_irq(struct i40e_hw *);
118 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
119 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
121 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
122 static void ixlv_register_vlan(void *, struct ifnet *, u16);
123 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
125 static void ixlv_init_hw(struct ixlv_sc *);
126 static int ixlv_setup_vc(struct ixlv_sc *);
127 static int ixlv_vf_config(struct ixlv_sc *);
129 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
130 struct ifnet *, int);
132 static void ixlv_add_sysctls(struct ixlv_sc *);
134 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
135 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
138 /*********************************************************************
139 * FreeBSD Device Interface Entry Points
140 *********************************************************************/
142 static device_method_t ixlv_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, ixlv_probe),
145 DEVMETHOD(device_attach, ixlv_attach),
146 DEVMETHOD(device_detach, ixlv_detach),
147 DEVMETHOD(device_shutdown, ixlv_shutdown),
151 static driver_t ixlv_driver = {
152 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
155 devclass_t ixlv_devclass;
156 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
158 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
159 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
162 ** TUNEABLE PARAMETERS:
165 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
166 "IXLV driver parameters");
169 ** Number of descriptors per ring:
170 ** - TX and RX are the same size
172 static int ixlv_ringsz = IXL_DEFAULT_RING;
173 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
174 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
175 &ixlv_ringsz, 0, "Descriptor Ring Size");
177 /* Set to zero to auto calculate */
178 int ixlv_max_queues = 0;
179 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
180 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
181 &ixlv_max_queues, 0, "Number of Queues");
184 ** Number of entries in Tx queue buf_ring.
185 ** Increasing this will reduce the number of
186 ** errors when transmitting fragmented UDP
189 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
190 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
192 &ixlv_txbrsz, 0, "TX Buf Ring Size");
195 ** Controls for Interrupt Throttling
196 ** - true/false for dynamic adjustment
197 ** - default values for static ITR
199 int ixlv_dynamic_rx_itr = 0;
200 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
201 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
202 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
204 int ixlv_dynamic_tx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
207 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
209 int ixlv_rx_itr = IXL_ITR_8K;
210 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
212 &ixlv_rx_itr, 0, "RX Interrupt Rate");
214 int ixlv_tx_itr = IXL_ITR_4K;
215 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
217 &ixlv_tx_itr, 0, "TX Interrupt Rate");
220 /*********************************************************************
221 * Device identification routine
223 * ixlv_probe determines if the driver should be loaded on
224 * the hardware based on PCI vendor/device id of the device.
226 * return BUS_PROBE_DEFAULT on success, positive on failure
227 *********************************************************************/
230 ixlv_probe(device_t dev)
232 ixl_vendor_info_t *ent;
234 u16 pci_vendor_id, pci_device_id;
235 u16 pci_subvendor_id, pci_subdevice_id;
236 char device_name[256];
239 INIT_DEBUGOUT("ixlv_probe: begin");
242 pci_vendor_id = pci_get_vendor(dev);
243 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
246 pci_device_id = pci_get_device(dev);
247 pci_subvendor_id = pci_get_subvendor(dev);
248 pci_subdevice_id = pci_get_subdevice(dev);
250 ent = ixlv_vendor_info_array;
251 while (ent->vendor_id != 0) {
252 if ((pci_vendor_id == ent->vendor_id) &&
253 (pci_device_id == ent->device_id) &&
255 ((pci_subvendor_id == ent->subvendor_id) ||
256 (ent->subvendor_id == 0)) &&
258 ((pci_subdevice_id == ent->subdevice_id) ||
259 (ent->subdevice_id == 0))) {
260 sprintf(device_name, "%s, Version - %s",
261 ixlv_strings[ent->index],
262 ixlv_driver_version);
263 device_set_desc_copy(dev, device_name);
264 return (BUS_PROBE_DEFAULT);
271 /*********************************************************************
272 * Device initialization routine
274 * The attach entry point is called when the driver is being loaded.
275 * This routine identifies the type of hardware, allocates all resources
276 * and initializes the hardware.
278 * return 0 on success, positive on failure
279 *********************************************************************/
282 ixlv_attach(device_t dev)
289 INIT_DBG_DEV(dev, "begin");
291 /* Allocate, clear, and link in our primary soft structure */
292 sc = device_get_softc(dev);
293 sc->dev = sc->osdep.dev = dev;
298 /* Initialize hw struct */
301 /* Allocate filter lists */
302 ixlv_init_filters(sc);
305 mtx_init(&sc->mtx, device_get_nameunit(dev),
306 "IXL SC Lock", MTX_DEF);
308 /* Set up the timer callout */
309 callout_init_mtx(&sc->timer, &sc->mtx, 0);
311 /* Do PCI setup - map BAR0, etc */
312 if (ixlv_allocate_pci_resources(sc)) {
313 device_printf(dev, "%s: Allocation of PCI resources failed\n",
319 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
321 error = i40e_set_mac_type(hw);
323 device_printf(dev, "%s: set_mac_type failed: %d\n",
328 error = ixlv_reset_complete(hw);
330 device_printf(dev, "%s: Device is still being reset\n",
335 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
337 error = ixlv_setup_vc(sc);
339 device_printf(dev, "%s: Error setting up PF comms, %d\n",
344 INIT_DBG_DEV(dev, "PF API version verified");
346 /* Need API version before sending reset message */
347 error = ixlv_reset(sc);
349 device_printf(dev, "VF reset failed; reload the driver\n");
353 INIT_DBG_DEV(dev, "VF reset complete");
355 /* Ask for VF config from PF */
356 error = ixlv_vf_config(sc);
358 device_printf(dev, "Error getting configuration from PF: %d\n",
363 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
364 sc->vf_res->num_vsis,
365 sc->vf_res->num_queue_pairs,
366 sc->vf_res->max_vectors,
367 sc->vf_res->rss_key_size,
368 sc->vf_res->rss_lut_size);
370 device_printf(dev, "Offload flags: 0x%b\n",
371 sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
374 /* got VF config message back from PF, now we can parse it */
375 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
376 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
377 sc->vsi_res = &sc->vf_res->vsi_res[i];
380 device_printf(dev, "%s: no LAN VSI found\n", __func__);
385 INIT_DBG_DEV(dev, "Resource Acquisition complete");
387 /* If no mac address was assigned just make a random one */
388 if (!ixlv_check_ether_addr(hw->mac.addr)) {
389 u8 addr[ETHER_ADDR_LEN];
390 arc4rand(&addr, sizeof(addr), 0);
393 bcopy(addr, hw->mac.addr, sizeof(addr));
396 /* Now that the number of queues for this VF is known, set up interrupts */
397 sc->msix = ixlv_init_msix(sc);
398 /* We fail without MSIX support */
404 vsi->id = sc->vsi_res->vsi_id;
405 vsi->back = (void *)sc;
408 /* This allocates the memory and early settings */
409 if (ixlv_setup_queues(sc) != 0) {
410 device_printf(dev, "%s: setup queues failed!\n",
416 /* Setup the stack interface */
417 if (ixlv_setup_interface(dev, sc) != 0) {
418 device_printf(dev, "%s: setup interface failed!\n",
424 INIT_DBG_DEV(dev, "Queue memory and interface setup");
426 /* Do queue interrupt setup */
427 if (ixlv_assign_msix(sc) != 0) {
428 device_printf(dev, "%s: allocating queue interrupts failed!\n",
434 /* Start AdminQ taskqueue */
435 ixlv_init_taskqueue(sc);
437 /* Initialize stats */
438 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
439 ixlv_add_sysctls(sc);
441 /* Register for VLAN events */
442 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
443 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
444 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
445 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
447 /* We want AQ enabled early */
448 ixlv_enable_adminq_irq(hw);
450 /* Set things up to run init */
451 sc->init_state = IXLV_INIT_READY;
453 ixl_vc_init_mgr(sc, &sc->vc_mgr);
455 INIT_DBG_DEV(dev, "end");
459 ixlv_free_queues(vsi);
461 free(sc->vf_res, M_DEVBUF);
463 i40e_shutdown_adminq(hw);
465 ixlv_free_pci_resources(sc);
467 mtx_destroy(&sc->mtx);
468 ixlv_free_filters(sc);
469 INIT_DBG_DEV(dev, "end: error %d", error);
473 /*********************************************************************
474 * Device removal routine
476 * The detach entry point is called when the driver is being removed.
477 * This routine stops the adapter and deallocates all the resources
478 * that were allocated for driver operation.
480 * return 0 on success, positive on failure
481 *********************************************************************/
484 ixlv_detach(device_t dev)
486 struct ixlv_sc *sc = device_get_softc(dev);
487 struct ixl_vsi *vsi = &sc->vsi;
488 struct i40e_hw *hw = &sc->hw;
489 enum i40e_status_code status;
491 INIT_DBG_DEV(dev, "begin");
493 /* Make sure VLANS are not using driver */
494 if (vsi->ifp->if_vlantrunk != NULL) {
495 if_printf(vsi->ifp, "Vlan in use, detach first\n");
500 ether_ifdetach(vsi->ifp);
501 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
504 mtx_unlock(&sc->mtx);
507 /* Unregister VLAN events */
508 if (vsi->vlan_attach != NULL)
509 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
510 if (vsi->vlan_detach != NULL)
511 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
514 callout_drain(&sc->vc_mgr.callout);
516 ixlv_disable_adminq_irq(hw);
517 ixlv_teardown_adminq_msix(sc);
518 /* Drain admin queue taskqueue */
519 taskqueue_free(sc->tq);
520 status = i40e_shutdown_adminq(&sc->hw);
521 if (status != I40E_SUCCESS) {
523 "i40e_shutdown_adminq() failed with status %s\n",
524 i40e_stat_str(hw, status));
528 free(sc->vf_res, M_DEVBUF);
529 ixlv_free_pci_resources(sc);
530 ixlv_free_queues(vsi);
531 ixlv_free_filters(sc);
533 bus_generic_detach(dev);
534 mtx_destroy(&sc->mtx);
535 INIT_DBG_DEV(dev, "end");
539 /*********************************************************************
541 * Shutdown entry point
543 **********************************************************************/
546 ixlv_shutdown(device_t dev)
548 struct ixlv_sc *sc = device_get_softc(dev);
550 INIT_DBG_DEV(dev, "begin");
554 mtx_unlock(&sc->mtx);
556 INIT_DBG_DEV(dev, "end");
561 * Configure TXCSUM(IPV6) and TSO(4/6)
562 * - the hardware handles these together so we
566 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
568 /* Enable/disable TXCSUM/TSO4 */
569 if (!(ifp->if_capenable & IFCAP_TXCSUM)
570 && !(ifp->if_capenable & IFCAP_TSO4)) {
571 if (mask & IFCAP_TXCSUM) {
572 ifp->if_capenable |= IFCAP_TXCSUM;
573 /* enable TXCSUM, restore TSO if previously enabled */
574 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
575 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
576 ifp->if_capenable |= IFCAP_TSO4;
579 else if (mask & IFCAP_TSO4) {
580 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
581 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
583 "TSO4 requires txcsum, enabling both...\n");
585 } else if((ifp->if_capenable & IFCAP_TXCSUM)
586 && !(ifp->if_capenable & IFCAP_TSO4)) {
587 if (mask & IFCAP_TXCSUM)
588 ifp->if_capenable &= ~IFCAP_TXCSUM;
589 else if (mask & IFCAP_TSO4)
590 ifp->if_capenable |= IFCAP_TSO4;
591 } else if((ifp->if_capenable & IFCAP_TXCSUM)
592 && (ifp->if_capenable & IFCAP_TSO4)) {
593 if (mask & IFCAP_TXCSUM) {
594 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
595 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
597 "TSO4 requires txcsum, disabling both...\n");
598 } else if (mask & IFCAP_TSO4)
599 ifp->if_capenable &= ~IFCAP_TSO4;
602 /* Enable/disable TXCSUM_IPV6/TSO6 */
603 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604 && !(ifp->if_capenable & IFCAP_TSO6)) {
605 if (mask & IFCAP_TXCSUM_IPV6) {
606 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
607 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
608 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
609 ifp->if_capenable |= IFCAP_TSO6;
611 } else if (mask & IFCAP_TSO6) {
612 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
613 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
615 "TSO6 requires txcsum6, enabling both...\n");
617 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
618 && !(ifp->if_capenable & IFCAP_TSO6)) {
619 if (mask & IFCAP_TXCSUM_IPV6)
620 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
621 else if (mask & IFCAP_TSO6)
622 ifp->if_capenable |= IFCAP_TSO6;
623 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
624 && (ifp->if_capenable & IFCAP_TSO6)) {
625 if (mask & IFCAP_TXCSUM_IPV6) {
626 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
627 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
629 "TSO6 requires txcsum6, disabling both...\n");
630 } else if (mask & IFCAP_TSO6)
631 ifp->if_capenable &= ~IFCAP_TSO6;
635 /*********************************************************************
638 * ixlv_ioctl is called when the user wants to configure the
641 * return 0 on success, positive on failure
642 **********************************************************************/
645 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
647 struct ixl_vsi *vsi = ifp->if_softc;
648 struct ixlv_sc *sc = vsi->back;
649 struct ifreq *ifr = (struct ifreq *)data;
650 #if defined(INET) || defined(INET6)
651 struct ifaddr *ifa = (struct ifaddr *)data;
652 bool avoid_reset = FALSE;
661 if (ifa->ifa_addr->sa_family == AF_INET)
665 if (ifa->ifa_addr->sa_family == AF_INET6)
668 #if defined(INET) || defined(INET6)
670 ** Calling init results in link renegotiation,
671 ** so we avoid doing it when possible.
674 ifp->if_flags |= IFF_UP;
675 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
678 if (!(ifp->if_flags & IFF_NOARP))
679 arp_ifinit(ifp, ifa);
682 error = ether_ioctl(ifp, command, data);
686 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
688 if (ifr->ifr_mtu > IXL_MAX_FRAME -
689 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
691 IOCTL_DBG_IF(ifp, "mtu too large");
693 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
694 // ERJ: Interestingly enough, these types don't match
695 ifp->if_mtu = (u_long)ifr->ifr_mtu;
696 vsi->max_frame_size =
697 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
698 + ETHER_VLAN_ENCAP_LEN;
699 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
700 ixlv_init_locked(sc);
702 mtx_unlock(&sc->mtx);
705 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
707 if (ifp->if_flags & IFF_UP) {
708 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
709 ixlv_init_locked(sc);
711 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
713 sc->if_flags = ifp->if_flags;
714 mtx_unlock(&sc->mtx);
717 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
718 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
720 ixlv_disable_intr(vsi);
722 ixlv_enable_intr(vsi);
723 mtx_unlock(&sc->mtx);
727 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
728 if (sc->init_state == IXLV_RUNNING) {
730 ixlv_disable_intr(vsi);
732 ixlv_enable_intr(vsi);
733 mtx_unlock(&sc->mtx);
738 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
739 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
743 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
744 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
746 ixlv_cap_txcsum_tso(vsi, ifp, mask);
748 if (mask & IFCAP_RXCSUM)
749 ifp->if_capenable ^= IFCAP_RXCSUM;
750 if (mask & IFCAP_RXCSUM_IPV6)
751 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
752 if (mask & IFCAP_LRO)
753 ifp->if_capenable ^= IFCAP_LRO;
754 if (mask & IFCAP_VLAN_HWTAGGING)
755 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
756 if (mask & IFCAP_VLAN_HWFILTER)
757 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
758 if (mask & IFCAP_VLAN_HWTSO)
759 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
763 VLAN_CAPABILITIES(ifp);
769 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
770 error = ether_ioctl(ifp, command, data);
778 ** To do a reinit on the VF is unfortunately more complicated
779 ** than a physical device, we must have the PF more or less
780 ** completely recreate our memory, so many things that were
781 ** done only once at attach in traditional drivers now must be
782 ** redone at each reinitialization. This function does that
783 ** 'prelude' so we can then call the normal locked init code.
786 ixlv_reinit_locked(struct ixlv_sc *sc)
788 struct i40e_hw *hw = &sc->hw;
789 struct ixl_vsi *vsi = &sc->vsi;
790 struct ifnet *ifp = vsi->ifp;
791 struct ixlv_mac_filter *mf, *mf_temp;
792 struct ixlv_vlan_filter *vf;
795 INIT_DBG_IF(ifp, "begin");
797 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
800 error = ixlv_reset(sc);
802 INIT_DBG_IF(ifp, "VF was reset");
804 /* set the state in case we went thru RESET */
805 sc->init_state = IXLV_RUNNING;
808 ** Resetting the VF drops all filters from hardware;
809 ** we need to mark them to be re-added in init.
811 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
812 if (mf->flags & IXL_FILTER_DEL) {
813 SLIST_REMOVE(sc->mac_filters, mf,
814 ixlv_mac_filter, next);
817 mf->flags |= IXL_FILTER_ADD;
819 if (vsi->num_vlans != 0)
820 SLIST_FOREACH(vf, sc->vlan_filters, next)
821 vf->flags = IXL_FILTER_ADD;
822 else { /* clean any stale filters */
823 while (!SLIST_EMPTY(sc->vlan_filters)) {
824 vf = SLIST_FIRST(sc->vlan_filters);
825 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
830 ixlv_enable_adminq_irq(hw);
831 ixl_vc_flush(&sc->vc_mgr);
833 INIT_DBG_IF(ifp, "end");
838 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
839 enum i40e_status_code code)
846 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
847 * happens while a command is in progress, so we don't print an error
850 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
851 if_printf(sc->vsi.ifp,
852 "Error %s waiting for PF to complete operation %d\n",
853 i40e_stat_str(&sc->hw, code), cmd->request);
858 ixlv_init_locked(struct ixlv_sc *sc)
860 struct i40e_hw *hw = &sc->hw;
861 struct ixl_vsi *vsi = &sc->vsi;
862 struct ixl_queue *que = vsi->queues;
863 struct ifnet *ifp = vsi->ifp;
866 INIT_DBG_IF(ifp, "begin");
868 IXLV_CORE_LOCK_ASSERT(sc);
870 /* Do a reinit first if an init has already been done */
871 if ((sc->init_state == IXLV_RUNNING) ||
872 (sc->init_state == IXLV_RESET_REQUIRED) ||
873 (sc->init_state == IXLV_RESET_PENDING))
874 error = ixlv_reinit_locked(sc);
875 /* Don't bother with init if we failed reinit */
879 /* Remove existing MAC filter if new MAC addr is set */
880 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
881 error = ixlv_del_mac_filter(sc, hw->mac.addr);
883 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
884 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
888 /* Check for an LAA mac address... */
889 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
891 ifp->if_hwassist = 0;
892 if (ifp->if_capenable & IFCAP_TSO)
893 ifp->if_hwassist |= CSUM_TSO;
894 if (ifp->if_capenable & IFCAP_TXCSUM)
895 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
896 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
897 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
899 /* Add mac filter for this VF to PF */
900 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
901 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
902 if (!error || error == EEXIST)
903 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
904 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
908 /* Setup vlan's if needed */
909 ixlv_setup_vlan_filters(sc);
911 /* Prepare the queues for operation */
912 for (int i = 0; i < vsi->num_queues; i++, que++) {
913 struct rx_ring *rxr = &que->rxr;
915 ixl_init_tx_ring(que);
917 if (vsi->max_frame_size <= MCLBYTES)
918 rxr->mbuf_sz = MCLBYTES;
920 rxr->mbuf_sz = MJUMPAGESIZE;
921 ixl_init_rx_ring(que);
924 /* Set initial ITR values */
925 ixlv_configure_itr(sc);
927 /* Configure queues */
928 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
929 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
935 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
936 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
939 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
940 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
942 /* Start the local timer */
943 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
945 sc->init_state = IXLV_RUNNING;
948 INIT_DBG_IF(ifp, "end");
953 ** Init entry point for the stack
958 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
959 struct ixlv_sc *sc = vsi->back;
962 /* Prevent init from running again while waiting for AQ calls
963 * made in init_locked() to complete. */
965 if (sc->init_in_progress) {
966 mtx_unlock(&sc->mtx);
969 sc->init_in_progress = true;
971 ixlv_init_locked(sc);
972 mtx_unlock(&sc->mtx);
974 /* Wait for init_locked to finish */
975 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
976 && ++retries < IXLV_MAX_INIT_WAIT) {
979 if (retries >= IXLV_MAX_INIT_WAIT) {
981 "Init failed to complete in allotted time!\n");
985 sc->init_in_progress = false;
986 mtx_unlock(&sc->mtx);
990 * ixlv_attach() helper function; gathers information about
991 * the (virtual) hardware for use elsewhere in the driver.
994 ixlv_init_hw(struct ixlv_sc *sc)
996 struct i40e_hw *hw = &sc->hw;
997 device_t dev = sc->dev;
999 /* Save off the information about this board */
1000 hw->vendor_id = pci_get_vendor(dev);
1001 hw->device_id = pci_get_device(dev);
1002 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1003 hw->subsystem_vendor_id =
1004 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1005 hw->subsystem_device_id =
1006 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1008 hw->bus.device = pci_get_slot(dev);
1009 hw->bus.func = pci_get_function(dev);
1013 * ixlv_attach() helper function; initalizes the admin queue
1014 * and attempts to establish contact with the PF by
1015 * retrying the initial "API version" message several times
1016 * or until the PF responds.
1019 ixlv_setup_vc(struct ixlv_sc *sc)
1021 struct i40e_hw *hw = &sc->hw;
1022 device_t dev = sc->dev;
1023 int error = 0, ret_error = 0, asq_retries = 0;
1024 bool send_api_ver_retried = 0;
1026 /* Need to set these AQ paramters before initializing AQ */
1027 hw->aq.num_arq_entries = IXL_AQ_LEN;
1028 hw->aq.num_asq_entries = IXL_AQ_LEN;
1029 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1030 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1032 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1033 /* Initialize admin queue */
1034 error = i40e_init_adminq(hw);
1036 device_printf(dev, "%s: init_adminq failed: %d\n",
1042 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1043 " send_api_ver attempt %d", i+1);
1046 /* Send VF's API version */
1047 error = ixlv_send_api_ver(sc);
1049 i40e_shutdown_adminq(hw);
1051 device_printf(dev, "%s: unable to send api"
1052 " version to PF on attempt %d, error %d\n",
1053 __func__, i+1, error);
1057 while (!i40e_asq_done(hw)) {
1058 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1059 i40e_shutdown_adminq(hw);
1060 device_printf(dev, "Admin Queue timeout "
1061 "(waiting for send_api_ver), %d more tries...\n",
1062 IXLV_AQ_MAX_ERR - (i + 1));
1066 i40e_msec_pause(10);
1068 if (asq_retries > IXLV_AQ_MAX_ERR)
1071 INIT_DBG_DEV(dev, "Sent API version message to PF");
1073 /* Verify that the VF accepts the PF's API version */
1074 error = ixlv_verify_api_ver(sc);
1075 if (error == ETIMEDOUT) {
1076 if (!send_api_ver_retried) {
1077 /* Resend message, one more time */
1078 send_api_ver_retried++;
1080 "%s: Timeout while verifying API version on first"
1081 " try!\n", __func__);
1085 "%s: Timeout while verifying API version on second"
1086 " try!\n", __func__);
1093 "%s: Unable to verify API version,"
1094 " error %s\n", __func__, i40e_stat_str(hw, error));
1101 i40e_shutdown_adminq(hw);
1106 * ixlv_attach() helper function; asks the PF for this VF's
1107 * configuration, and saves the information if it receives it.
1110 ixlv_vf_config(struct ixlv_sc *sc)
1112 struct i40e_hw *hw = &sc->hw;
1113 device_t dev = sc->dev;
1114 int bufsz, error = 0, ret_error = 0;
1115 int asq_retries, retried = 0;
1118 error = ixlv_send_vf_config_msg(sc);
1121 "%s: Unable to send VF config request, attempt %d,"
1122 " error %d\n", __func__, retried + 1, error);
1127 while (!i40e_asq_done(hw)) {
1128 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1129 device_printf(dev, "%s: Admin Queue timeout "
1130 "(waiting for send_vf_config_msg), attempt %d\n",
1131 __func__, retried + 1);
1135 i40e_msec_pause(10);
1138 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1142 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1143 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1144 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1147 "%s: Unable to allocate memory for VF configuration"
1148 " message from PF on attempt %d\n", __func__, retried + 1);
1154 /* Check for VF config response */
1155 error = ixlv_get_vf_config(sc);
1156 if (error == ETIMEDOUT) {
1157 /* The 1st time we timeout, send the configuration message again */
1163 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1168 "%s: Unable to get VF configuration from PF after %d tries!\n",
1169 __func__, retried + 1);
1175 free(sc->vf_res, M_DEVBUF);
1181 * Allocate MSI/X vectors, setup the AQ vector early
1184 ixlv_init_msix(struct ixlv_sc *sc)
1186 device_t dev = sc->dev;
1187 int rid, want, vectors, queues, available;
1188 int auto_max_queues;
1190 rid = PCIR_BAR(IXL_MSIX_BAR);
1191 sc->msix_mem = bus_alloc_resource_any(dev,
1192 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1193 if (!sc->msix_mem) {
1194 /* May not be enabled */
1195 device_printf(sc->dev,
1196 "Unable to map MSIX table\n");
1200 available = pci_msix_count(dev);
1201 if (available == 0) { /* system has msix disabled */
1202 bus_release_resource(dev, SYS_RES_MEMORY,
1204 sc->msix_mem = NULL;
1208 /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1209 auto_max_queues = min(mp_ncpus, available - 1);
1210 /* Clamp queues to # assigned to VF by PF */
1211 auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1213 /* Override with tunable value if tunable is less than autoconfig count */
1214 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1215 queues = ixlv_max_queues;
1216 /* Use autoconfig amount if that's lower */
1217 else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1218 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1219 "autoconfig amount (%d)...\n",
1220 ixlv_max_queues, auto_max_queues);
1221 queues = auto_max_queues;
1223 /* Limit maximum auto-configured queues to 8 if no user value is set */
1225 queues = min(auto_max_queues, 8);
1228 /* If we're doing RSS, clamp at the number of RSS buckets */
1229 if (queues > rss_getnumbuckets())
1230 queues = rss_getnumbuckets();
1234 ** Want one vector (RX/TX pair) per queue
1235 ** plus an additional for the admin queue.
1238 if (want <= available) /* Have enough */
1241 device_printf(sc->dev,
1242 "MSIX Configuration Problem, "
1243 "%d vectors available but %d wanted!\n",
1250 * If we're doing RSS, the number of queues needs to
1251 * match the number of RSS buckets that are configured.
1253 * + If there's more queues than RSS buckets, we'll end
1254 * up with queues that get no traffic.
1256 * + If there's more RSS buckets than queues, we'll end
1257 * up having multiple RSS buckets map to the same queue,
1258 * so there'll be some contention.
1260 if (queues != rss_getnumbuckets()) {
1262 "%s: queues (%d) != RSS buckets (%d)"
1263 "; performance will be impacted.\n",
1264 __func__, queues, rss_getnumbuckets());
1268 if (pci_alloc_msix(dev, &vectors) == 0) {
1269 device_printf(sc->dev,
1270 "Using MSIX interrupts with %d vectors\n", vectors);
1272 sc->vsi.num_queues = queues;
1275 /* Next we need to setup the vector for the Admin Queue */
1276 rid = 1; /* zero vector + 1 */
1277 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1278 &rid, RF_SHAREABLE | RF_ACTIVE);
1279 if (sc->res == NULL) {
1280 device_printf(dev, "Unable to allocate"
1281 " bus resource: AQ interrupt \n");
1284 if (bus_setup_intr(dev, sc->res,
1285 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1286 ixlv_msix_adminq, sc, &sc->tag)) {
1288 device_printf(dev, "Failed to register AQ handler");
1291 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1296 /* The VF driver MUST use MSIX */
1301 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1304 device_t dev = sc->dev;
1307 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1310 if (!(sc->pci_mem)) {
1311 device_printf(dev, "Unable to allocate bus resource: memory\n");
1315 sc->osdep.mem_bus_space_tag =
1316 rman_get_bustag(sc->pci_mem);
1317 sc->osdep.mem_bus_space_handle =
1318 rman_get_bushandle(sc->pci_mem);
1319 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1320 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1321 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1323 sc->hw.back = &sc->osdep;
1326 ** Explicitly set the guest PCI BUSMASTER capability
1327 ** and we must rewrite the ENABLE in the MSIX control
1328 ** register again at this point to cause the host to
1329 ** successfully initialize us.
1331 ** This must be set before accessing any registers.
1336 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1337 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1338 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1339 pci_find_cap(dev, PCIY_MSIX, &rid);
1340 rid += PCIR_MSIX_CTRL;
1341 msix_ctrl = pci_read_config(dev, rid, 2);
1342 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1343 pci_write_config(dev, rid, msix_ctrl, 2);
1346 /* Disable adminq interrupts (just in case) */
1347 ixlv_disable_adminq_irq(&sc->hw);
1353 ixlv_free_pci_resources(struct ixlv_sc *sc)
1355 struct ixl_vsi *vsi = &sc->vsi;
1356 struct ixl_queue *que = vsi->queues;
1357 device_t dev = sc->dev;
1359 /* We may get here before stations are setup */
1364 ** Release all msix queue resources:
1366 for (int i = 0; i < vsi->num_queues; i++, que++) {
1367 int rid = que->msix + 1;
1368 if (que->tag != NULL) {
1369 bus_teardown_intr(dev, que->res, que->tag);
1372 if (que->res != NULL) {
1373 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1379 pci_release_msi(dev);
1381 if (sc->msix_mem != NULL)
1382 bus_release_resource(dev, SYS_RES_MEMORY,
1383 PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1385 if (sc->pci_mem != NULL)
1386 bus_release_resource(dev, SYS_RES_MEMORY,
1387 PCIR_BAR(0), sc->pci_mem);
1391 * Create taskqueue and tasklet for Admin Queue interrupts.
1394 ixlv_init_taskqueue(struct ixlv_sc *sc)
1398 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1400 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1401 taskqueue_thread_enqueue, &sc->tq);
1402 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1403 device_get_nameunit(sc->dev));
1408 /*********************************************************************
1410 * Setup MSIX Interrupt resources and handlers for the VSI queues
1412 **********************************************************************/
1414 ixlv_assign_msix(struct ixlv_sc *sc)
1416 device_t dev = sc->dev;
1417 struct ixl_vsi *vsi = &sc->vsi;
1418 struct ixl_queue *que = vsi->queues;
1419 struct tx_ring *txr;
1420 int error, rid, vector = 1;
1425 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1429 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1430 RF_SHAREABLE | RF_ACTIVE);
1431 if (que->res == NULL) {
1432 device_printf(dev,"Unable to allocate"
1433 " bus resource: que interrupt [%d]\n", vector);
1436 /* Set the handler function */
1437 error = bus_setup_intr(dev, que->res,
1438 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1439 ixlv_msix_que, que, &que->tag);
1442 device_printf(dev, "Failed to register que handler");
1445 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1446 /* Bind the vector to a CPU */
1448 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1450 bus_bind_intr(dev, que->res, cpu_id);
1452 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1453 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1454 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1455 taskqueue_thread_enqueue, &que->tq);
1457 CPU_SETOF(cpu_id, &cpu_mask);
1458 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1459 &cpu_mask, "%s (bucket %d)",
1460 device_get_nameunit(dev), cpu_id);
1462 taskqueue_start_threads(&que->tq, 1, PI_NET,
1463 "%s que", device_get_nameunit(dev));
1472 ** Requests a VF reset from the PF.
1474 ** Requires the VF's Admin Queue to be initialized.
1477 ixlv_reset(struct ixlv_sc *sc)
1479 struct i40e_hw *hw = &sc->hw;
1480 device_t dev = sc->dev;
1483 /* Ask the PF to reset us if we are initiating */
1484 if (sc->init_state != IXLV_RESET_PENDING)
1485 ixlv_request_reset(sc);
1487 i40e_msec_pause(100);
1488 error = ixlv_reset_complete(hw);
1490 device_printf(dev, "%s: VF reset failed\n",
1495 error = i40e_shutdown_adminq(hw);
1497 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1502 error = i40e_init_adminq(hw);
1504 device_printf(dev, "%s: init_adminq failed: %d\n",
1513 ixlv_reset_complete(struct i40e_hw *hw)
1517 /* Wait up to ~10 seconds */
1518 for (int i = 0; i < 100; i++) {
1519 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1520 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1522 if ((reg == I40E_VFR_VFACTIVE) ||
1523 (reg == I40E_VFR_COMPLETED))
1525 i40e_msec_pause(100);
1532 /*********************************************************************
1534 * Setup networking device structure and register an interface.
1536 **********************************************************************/
1538 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1541 struct ixl_vsi *vsi = &sc->vsi;
1542 struct ixl_queue *que = vsi->queues;
1544 INIT_DBG_DEV(dev, "begin");
1546 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1548 device_printf(dev, "%s: could not allocate ifnet"
1549 " structure!\n", __func__);
1553 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1555 ifp->if_mtu = ETHERMTU;
1556 ifp->if_baudrate = IF_Gbps(40);
1557 ifp->if_init = ixlv_init;
1558 ifp->if_softc = vsi;
1559 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1560 ifp->if_ioctl = ixlv_ioctl;
1562 #if __FreeBSD_version >= 1100000
1563 if_setgetcounterfn(ifp, ixl_get_counter);
1566 ifp->if_transmit = ixl_mq_start;
1568 ifp->if_qflush = ixl_qflush;
1569 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1571 ether_ifattach(ifp, sc->hw.mac.addr);
1573 vsi->max_frame_size =
1574 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1575 + ETHER_VLAN_ENCAP_LEN;
1578 * Tell the upper layer(s) we support long frames.
1580 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1582 ifp->if_capabilities |= IFCAP_HWCSUM;
1583 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1584 ifp->if_capabilities |= IFCAP_TSO;
1585 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1587 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1592 ifp->if_capenable = ifp->if_capabilities;
1595 ** Don't turn this on by default, if vlans are
1596 ** created on another pseudo device (eg. lagg)
1597 ** then vlan events are not passed thru, breaking
1598 ** operation, but with HW FILTER off it works. If
1599 ** using vlans directly on the ixl driver you can
1600 ** enable this and get full hardware tag filtering.
1602 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1605 * Specify the media types supported by this adapter and register
1606 * callbacks to update media and link information
1608 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1611 // JFV Add media types later?
1613 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1614 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1616 INIT_DBG_DEV(dev, "end");
1621 ** Allocate and setup the interface queues
1624 ixlv_setup_queues(struct ixlv_sc *sc)
1626 device_t dev = sc->dev;
1627 struct ixl_vsi *vsi;
1628 struct ixl_queue *que;
1629 struct tx_ring *txr;
1630 struct rx_ring *rxr;
1632 int error = I40E_SUCCESS;
1635 vsi->back = (void *)sc;
1639 /* Get memory for the station queues */
1641 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1642 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1643 device_printf(dev, "Unable to allocate queue memory\n");
1648 for (int i = 0; i < vsi->num_queues; i++) {
1649 que = &vsi->queues[i];
1650 que->num_desc = ixlv_ringsz;
1656 txr->tail = I40E_QTX_TAIL1(que->me);
1657 /* Initialize the TX lock */
1658 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1659 device_get_nameunit(dev), que->me);
1660 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1662 ** Create the TX descriptor ring, the extra int is
1663 ** added as the location for HEAD WB.
1665 tsize = roundup2((que->num_desc *
1666 sizeof(struct i40e_tx_desc)) +
1667 sizeof(u32), DBA_ALIGN);
1668 if (i40e_allocate_dma_mem(&sc->hw,
1669 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1671 "Unable to allocate TX Descriptor memory\n");
1675 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1676 bzero((void *)txr->base, tsize);
1677 /* Now allocate transmit soft structs for the ring */
1678 if (ixl_allocate_tx_data(que)) {
1680 "Critical Failure setting up TX structures\n");
1684 /* Allocate a buf ring */
1685 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1686 M_WAITOK, &txr->mtx);
1687 if (txr->br == NULL) {
1689 "Critical Failure setting up TX buf ring\n");
1695 * Next the RX queues...
1697 rsize = roundup2(que->num_desc *
1698 sizeof(union i40e_rx_desc), DBA_ALIGN);
1701 rxr->tail = I40E_QRX_TAIL1(que->me);
1703 /* Initialize the RX side lock */
1704 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1705 device_get_nameunit(dev), que->me);
1706 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1708 if (i40e_allocate_dma_mem(&sc->hw,
1709 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1711 "Unable to allocate RX Descriptor memory\n");
1715 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1716 bzero((void *)rxr->base, rsize);
1718 /* Allocate receive soft structs for the ring */
1719 if (ixl_allocate_rx_data(que)) {
1721 "Critical Failure setting up receive structs\n");
1730 for (int i = 0; i < vsi->num_queues; i++) {
1731 que = &vsi->queues[i];
1735 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1737 i40e_free_dma_mem(&sc->hw, &txr->dma);
1739 free(vsi->queues, M_DEVBUF);
1746 ** This routine is run via an vlan config EVENT,
1747 ** it enables us to use the HW Filter table since
1748 ** we can get the vlan id. This just creates the
1749 ** entry in the soft version of the VFTA, init will
1750 ** repopulate the real table.
1753 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1755 struct ixl_vsi *vsi = arg;
1756 struct ixlv_sc *sc = vsi->back;
1757 struct ixlv_vlan_filter *v;
1760 if (ifp->if_softc != arg) /* Not our event */
1763 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1766 /* Sanity check - make sure it doesn't already exist */
1767 SLIST_FOREACH(v, sc->vlan_filters, next) {
1768 if (v->vlan == vtag)
1774 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1775 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1777 v->flags = IXL_FILTER_ADD;
1778 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1779 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1780 mtx_unlock(&sc->mtx);
1785 ** This routine is run via an vlan
1786 ** unconfig EVENT, remove our entry
1787 ** in the soft vfta.
1790 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1792 struct ixl_vsi *vsi = arg;
1793 struct ixlv_sc *sc = vsi->back;
1794 struct ixlv_vlan_filter *v;
1797 if (ifp->if_softc != arg)
1800 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1804 SLIST_FOREACH(v, sc->vlan_filters, next) {
1805 if (v->vlan == vtag) {
1806 v->flags = IXL_FILTER_DEL;
1812 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1813 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1814 mtx_unlock(&sc->mtx);
1819 ** Get a new filter and add it to the mac filter list.
1821 static struct ixlv_mac_filter *
1822 ixlv_get_mac_filter(struct ixlv_sc *sc)
1824 struct ixlv_mac_filter *f;
1826 f = malloc(sizeof(struct ixlv_mac_filter),
1827 M_DEVBUF, M_NOWAIT | M_ZERO);
1829 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1835 ** Find the filter with matching MAC address
1837 static struct ixlv_mac_filter *
1838 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1840 struct ixlv_mac_filter *f;
1843 SLIST_FOREACH(f, sc->mac_filters, next) {
1844 if (cmp_etheraddr(f->macaddr, macaddr)) {
1856 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1858 device_t dev = sc->dev;
1861 if (sc->tag != NULL) {
1862 bus_teardown_intr(dev, sc->res, sc->tag);
1864 device_printf(dev, "bus_teardown_intr() for"
1865 " interrupt 0 failed\n");
1870 if (sc->res != NULL) {
1871 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1873 device_printf(dev, "bus_release_resource() for"
1874 " interrupt 0 failed\n");
1885 ** Admin Queue interrupt handler
1888 ixlv_msix_adminq(void *arg)
1890 struct ixlv_sc *sc = arg;
1891 struct i40e_hw *hw = &sc->hw;
1894 reg = rd32(hw, I40E_VFINT_ICR01);
1895 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1897 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1898 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1899 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1902 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1907 ixlv_enable_intr(struct ixl_vsi *vsi)
1909 struct i40e_hw *hw = vsi->hw;
1910 struct ixl_queue *que = vsi->queues;
1912 ixlv_enable_adminq_irq(hw);
1913 for (int i = 0; i < vsi->num_queues; i++, que++)
1914 ixlv_enable_queue_irq(hw, que->me);
1918 ixlv_disable_intr(struct ixl_vsi *vsi)
1920 struct i40e_hw *hw = vsi->hw;
1921 struct ixl_queue *que = vsi->queues;
1923 ixlv_disable_adminq_irq(hw);
1924 for (int i = 0; i < vsi->num_queues; i++, que++)
1925 ixlv_disable_queue_irq(hw, que->me);
1930 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1932 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1933 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1935 rd32(hw, I40E_VFGEN_RSTAT);
1940 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1942 wr32(hw, I40E_VFINT_DYN_CTL01,
1943 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1944 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1945 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1947 rd32(hw, I40E_VFGEN_RSTAT);
1952 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1956 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1957 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1958 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1959 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1963 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1965 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1966 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1967 rd32(hw, I40E_VFGEN_RSTAT);
1972 * Get initial ITR values from tunable values.
1975 ixlv_configure_itr(struct ixlv_sc *sc)
1977 struct i40e_hw *hw = &sc->hw;
1978 struct ixl_vsi *vsi = &sc->vsi;
1979 struct ixl_queue *que = vsi->queues;
1981 vsi->rx_itr_setting = ixlv_rx_itr;
1982 vsi->tx_itr_setting = ixlv_tx_itr;
1984 for (int i = 0; i < vsi->num_queues; i++, que++) {
1985 struct tx_ring *txr = &que->txr;
1986 struct rx_ring *rxr = &que->rxr;
1988 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1989 vsi->rx_itr_setting);
1990 rxr->itr = vsi->rx_itr_setting;
1991 rxr->latency = IXL_AVE_LATENCY;
1993 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1994 vsi->tx_itr_setting);
1995 txr->itr = vsi->tx_itr_setting;
1996 txr->latency = IXL_AVE_LATENCY;
2001 ** Provide a update to the queue RX
2002 ** interrupt moderation value.
2005 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2007 struct ixl_vsi *vsi = que->vsi;
2008 struct i40e_hw *hw = vsi->hw;
2009 struct rx_ring *rxr = &que->rxr;
2015 /* Idle, do nothing */
2016 if (rxr->bytes == 0)
2019 if (ixlv_dynamic_rx_itr) {
2020 rx_bytes = rxr->bytes/rxr->itr;
2023 /* Adjust latency range */
2024 switch (rxr->latency) {
2025 case IXL_LOW_LATENCY:
2026 if (rx_bytes > 10) {
2027 rx_latency = IXL_AVE_LATENCY;
2028 rx_itr = IXL_ITR_20K;
2031 case IXL_AVE_LATENCY:
2032 if (rx_bytes > 20) {
2033 rx_latency = IXL_BULK_LATENCY;
2034 rx_itr = IXL_ITR_8K;
2035 } else if (rx_bytes <= 10) {
2036 rx_latency = IXL_LOW_LATENCY;
2037 rx_itr = IXL_ITR_100K;
2040 case IXL_BULK_LATENCY:
2041 if (rx_bytes <= 20) {
2042 rx_latency = IXL_AVE_LATENCY;
2043 rx_itr = IXL_ITR_20K;
2048 rxr->latency = rx_latency;
2050 if (rx_itr != rxr->itr) {
2051 /* do an exponential smoothing */
2052 rx_itr = (10 * rx_itr * rxr->itr) /
2053 ((9 * rx_itr) + rxr->itr);
2054 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2055 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2056 que->me), rxr->itr);
2058 } else { /* We may have have toggled to non-dynamic */
2059 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2060 vsi->rx_itr_setting = ixlv_rx_itr;
2061 /* Update the hardware if needed */
2062 if (rxr->itr != vsi->rx_itr_setting) {
2063 rxr->itr = vsi->rx_itr_setting;
2064 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2065 que->me), rxr->itr);
2075 ** Provide a update to the queue TX
2076 ** interrupt moderation value.
2079 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2081 struct ixl_vsi *vsi = que->vsi;
2082 struct i40e_hw *hw = vsi->hw;
2083 struct tx_ring *txr = &que->txr;
2089 /* Idle, do nothing */
2090 if (txr->bytes == 0)
2093 if (ixlv_dynamic_tx_itr) {
2094 tx_bytes = txr->bytes/txr->itr;
2097 switch (txr->latency) {
2098 case IXL_LOW_LATENCY:
2099 if (tx_bytes > 10) {
2100 tx_latency = IXL_AVE_LATENCY;
2101 tx_itr = IXL_ITR_20K;
2104 case IXL_AVE_LATENCY:
2105 if (tx_bytes > 20) {
2106 tx_latency = IXL_BULK_LATENCY;
2107 tx_itr = IXL_ITR_8K;
2108 } else if (tx_bytes <= 10) {
2109 tx_latency = IXL_LOW_LATENCY;
2110 tx_itr = IXL_ITR_100K;
2113 case IXL_BULK_LATENCY:
2114 if (tx_bytes <= 20) {
2115 tx_latency = IXL_AVE_LATENCY;
2116 tx_itr = IXL_ITR_20K;
2121 txr->latency = tx_latency;
2123 if (tx_itr != txr->itr) {
2124 /* do an exponential smoothing */
2125 tx_itr = (10 * tx_itr * txr->itr) /
2126 ((9 * tx_itr) + txr->itr);
2127 txr->itr = min(tx_itr, IXL_MAX_ITR);
2128 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2129 que->me), txr->itr);
2132 } else { /* We may have have toggled to non-dynamic */
2133 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2134 vsi->tx_itr_setting = ixlv_tx_itr;
2135 /* Update the hardware if needed */
2136 if (txr->itr != vsi->tx_itr_setting) {
2137 txr->itr = vsi->tx_itr_setting;
2138 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2139 que->me), txr->itr);
2150 ** MSIX Interrupt Handlers and Tasklets
2154 ixlv_handle_que(void *context, int pending)
2156 struct ixl_queue *que = context;
2157 struct ixl_vsi *vsi = que->vsi;
2158 struct i40e_hw *hw = vsi->hw;
2159 struct tx_ring *txr = &que->txr;
2160 struct ifnet *ifp = vsi->ifp;
2163 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2164 more = ixl_rxeof(que, IXL_RX_LIMIT);
2165 mtx_lock(&txr->mtx);
2167 if (!drbr_empty(ifp, txr->br))
2168 ixl_mq_start_locked(ifp, txr);
2169 mtx_unlock(&txr->mtx);
2171 taskqueue_enqueue(que->tq, &que->task);
2176 /* Reenable this interrupt - hmmm */
2177 ixlv_enable_queue_irq(hw, que->me);
2182 /*********************************************************************
2184 * MSIX Queue Interrupt Service routine
2186 **********************************************************************/
2188 ixlv_msix_que(void *arg)
2190 struct ixl_queue *que = arg;
2191 struct ixl_vsi *vsi = que->vsi;
2192 struct i40e_hw *hw = vsi->hw;
2193 struct tx_ring *txr = &que->txr;
2194 bool more_tx, more_rx;
2196 /* Spurious interrupts are ignored */
2197 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2202 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2204 mtx_lock(&txr->mtx);
2205 more_tx = ixl_txeof(que);
2207 ** Make certain that if the stack
2208 ** has anything queued the task gets
2209 ** scheduled to handle it.
2211 if (!drbr_empty(vsi->ifp, txr->br))
2213 mtx_unlock(&txr->mtx);
2215 ixlv_set_queue_rx_itr(que);
2216 ixlv_set_queue_tx_itr(que);
2218 if (more_tx || more_rx)
2219 taskqueue_enqueue(que->tq, &que->task);
2221 ixlv_enable_queue_irq(hw, que->me);
2227 /*********************************************************************
2229 * Media Ioctl callback
2231 * This routine is called whenever the user queries the status of
2232 * the interface using ifconfig.
2234 **********************************************************************/
2236 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2238 struct ixl_vsi *vsi = ifp->if_softc;
2239 struct ixlv_sc *sc = vsi->back;
2241 INIT_DBG_IF(ifp, "begin");
2245 ixlv_update_link_status(sc);
2247 ifmr->ifm_status = IFM_AVALID;
2248 ifmr->ifm_active = IFM_ETHER;
2251 mtx_unlock(&sc->mtx);
2252 INIT_DBG_IF(ifp, "end: link not up");
2256 ifmr->ifm_status |= IFM_ACTIVE;
2257 /* Hardware is always full-duplex */
2258 ifmr->ifm_active |= IFM_FDX;
2259 mtx_unlock(&sc->mtx);
2260 INIT_DBG_IF(ifp, "end");
2264 /*********************************************************************
2266 * Media Ioctl callback
2268 * This routine is called when the user changes speed/duplex using
2269 * media/mediopt option with ifconfig.
2271 **********************************************************************/
2273 ixlv_media_change(struct ifnet * ifp)
2275 struct ixl_vsi *vsi = ifp->if_softc;
2276 struct ifmedia *ifm = &vsi->media;
2278 INIT_DBG_IF(ifp, "begin");
2280 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2283 INIT_DBG_IF(ifp, "end");
2288 /*********************************************************************
2289 * Multicast Initialization
2291 * This routine is called by init to reset a fresh state.
2293 **********************************************************************/
2296 ixlv_init_multi(struct ixl_vsi *vsi)
2298 struct ixlv_mac_filter *f;
2299 struct ixlv_sc *sc = vsi->back;
2302 IOCTL_DBG_IF(vsi->ifp, "begin");
2304 /* First clear any multicast filters */
2305 SLIST_FOREACH(f, sc->mac_filters, next) {
2306 if ((f->flags & IXL_FILTER_USED)
2307 && (f->flags & IXL_FILTER_MC)) {
2308 f->flags |= IXL_FILTER_DEL;
2313 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2314 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2317 IOCTL_DBG_IF(vsi->ifp, "end");
2321 ixlv_add_multi(struct ixl_vsi *vsi)
2323 struct ifmultiaddr *ifma;
2324 struct ifnet *ifp = vsi->ifp;
2325 struct ixlv_sc *sc = vsi->back;
2328 IOCTL_DBG_IF(ifp, "begin");
2330 if_maddr_rlock(ifp);
2332 ** Get a count, to decide if we
2333 ** simply use multicast promiscuous.
2335 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2336 if (ifma->ifma_addr->sa_family != AF_LINK)
2340 if_maddr_runlock(ifp);
2342 /* TODO: Remove -- cannot set promiscuous mode in a VF */
2343 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2344 /* delete all multicast filters */
2345 ixlv_init_multi(vsi);
2346 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2347 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2348 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2350 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2355 if_maddr_rlock(ifp);
2356 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2357 if (ifma->ifma_addr->sa_family != AF_LINK)
2359 if (!ixlv_add_mac_filter(sc,
2360 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2364 if_maddr_runlock(ifp);
2366 ** Notify AQ task that sw filters need to be
2370 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2371 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2374 IOCTL_DBG_IF(ifp, "end");
2378 ixlv_del_multi(struct ixl_vsi *vsi)
2380 struct ixlv_mac_filter *f;
2381 struct ifmultiaddr *ifma;
2382 struct ifnet *ifp = vsi->ifp;
2383 struct ixlv_sc *sc = vsi->back;
2387 IOCTL_DBG_IF(ifp, "begin");
2389 /* Search for removed multicast addresses */
2390 if_maddr_rlock(ifp);
2391 SLIST_FOREACH(f, sc->mac_filters, next) {
2392 if ((f->flags & IXL_FILTER_USED)
2393 && (f->flags & IXL_FILTER_MC)) {
2394 /* check if mac address in filter is in sc's list */
2396 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2397 if (ifma->ifma_addr->sa_family != AF_LINK)
2400 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2401 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2406 /* if this filter is not in the sc's list, remove it */
2407 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2408 f->flags |= IXL_FILTER_DEL;
2410 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2411 MAC_FORMAT_ARGS(f->macaddr));
2413 else if (match == FALSE)
2414 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2415 MAC_FORMAT_ARGS(f->macaddr));
2418 if_maddr_runlock(ifp);
2421 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2422 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2425 IOCTL_DBG_IF(ifp, "end");
2428 /*********************************************************************
2431 * This routine checks for link status,updates statistics,
2432 * and runs the watchdog check.
2434 **********************************************************************/
2437 ixlv_local_timer(void *arg)
2439 struct ixlv_sc *sc = arg;
2440 struct i40e_hw *hw = &sc->hw;
2441 struct ixl_vsi *vsi = &sc->vsi;
2442 struct ixl_queue *que = vsi->queues;
2443 device_t dev = sc->dev;
2444 struct tx_ring *txr;
2447 s32 timer, new_timer;
2449 IXLV_CORE_LOCK_ASSERT(sc);
2451 /* If Reset is in progress just bail */
2452 if (sc->init_state == IXLV_RESET_PENDING)
2455 /* Check for when PF triggers a VF reset */
2456 val = rd32(hw, I40E_VFGEN_RSTAT) &
2457 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2459 if (val != I40E_VFR_VFACTIVE
2460 && val != I40E_VFR_COMPLETED) {
2461 DDPRINTF(dev, "reset in progress! (%d)", val);
2465 ixlv_request_stats(sc);
2467 /* clean and process any events */
2468 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2471 ** Check status on the queues for a hang
2473 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2474 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
2475 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2477 for (int i = 0; i < vsi->num_queues; i++, que++) {
2479 timer = atomic_load_acq_32(&txr->watchdog_timer);
2481 new_timer = timer - hz;
2482 if (new_timer <= 0) {
2483 atomic_store_rel_32(&txr->watchdog_timer, -1);
2484 device_printf(dev, "WARNING: queue %d "
2485 "appears to be hung!\n", que->me);
2489 * If this fails, that means something in the TX path has updated
2490 * the watchdog, so it means the TX path is still working and
2491 * the watchdog doesn't need to countdown.
2493 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
2494 /* Any queues with outstanding work get a sw irq */
2495 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2499 /* Reset when a queue shows hung */
2503 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2507 device_printf(dev, "WARNING: Resetting!\n");
2508 sc->init_state = IXLV_RESET_REQUIRED;
2509 sc->watchdog_events++;
2511 ixlv_init_locked(sc);
2515 ** Note: this routine updates the OS on the link state
2516 ** the real check of the hardware only happens with
2517 ** a link interrupt.
2520 ixlv_update_link_status(struct ixlv_sc *sc)
2522 struct ixl_vsi *vsi = &sc->vsi;
2523 struct ifnet *ifp = vsi->ifp;
2526 if (vsi->link_active == FALSE) {
2528 if_printf(ifp,"Link is Up, %d Gbps\n",
2529 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2530 vsi->link_active = TRUE;
2531 if_link_state_change(ifp, LINK_STATE_UP);
2533 } else { /* Link down */
2534 if (vsi->link_active == TRUE) {
2536 if_printf(ifp,"Link is Down\n");
2537 if_link_state_change(ifp, LINK_STATE_DOWN);
2538 vsi->link_active = FALSE;
2545 /*********************************************************************
2547 * This routine disables all traffic on the adapter by issuing a
2548 * global reset on the MAC and deallocates TX/RX buffers.
2550 **********************************************************************/
2553 ixlv_stop(struct ixlv_sc *sc)
2559 INIT_DBG_IF(ifp, "begin");
2561 IXLV_CORE_LOCK_ASSERT(sc);
2563 ixl_vc_flush(&sc->vc_mgr);
2564 ixlv_disable_queues(sc);
2567 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2568 ((ticks - start) < hz/10))
2569 ixlv_do_adminq_locked(sc);
2571 /* Stop the local timer */
2572 callout_stop(&sc->timer);
2574 INIT_DBG_IF(ifp, "end");
2578 /*********************************************************************
2580 * Free all station queue structs.
2582 **********************************************************************/
2584 ixlv_free_queues(struct ixl_vsi *vsi)
2586 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2587 struct ixl_queue *que = vsi->queues;
2589 for (int i = 0; i < vsi->num_queues; i++, que++) {
2590 struct tx_ring *txr = &que->txr;
2591 struct rx_ring *rxr = &que->rxr;
2593 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2596 ixl_free_que_tx(que);
2598 i40e_free_dma_mem(&sc->hw, &txr->dma);
2600 IXL_TX_LOCK_DESTROY(txr);
2602 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2605 ixl_free_que_rx(que);
2607 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2609 IXL_RX_LOCK_DESTROY(rxr);
2612 free(vsi->queues, M_DEVBUF);
2616 ixlv_config_rss_reg(struct ixlv_sc *sc)
2618 struct i40e_hw *hw = &sc->hw;
2619 struct ixl_vsi *vsi = &sc->vsi;
2621 u64 set_hena = 0, hena;
2623 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2625 u32 rss_hash_config;
2628 /* Don't set up RSS if using a single queue */
2629 if (vsi->num_queues == 1) {
2630 wr32(hw, I40E_VFQF_HENA(0), 0);
2631 wr32(hw, I40E_VFQF_HENA(1), 0);
2637 /* Fetch the configured RSS key */
2638 rss_getkey((uint8_t *) &rss_seed);
2640 ixl_get_default_rss_key(rss_seed);
2643 /* Fill out hash function seed */
2644 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2645 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2647 /* Enable PCTYPES for RSS: */
2649 rss_hash_config = rss_gethashconfig();
2650 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2651 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2652 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2653 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2654 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2655 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2656 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2657 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2658 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2659 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2660 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2661 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2662 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2663 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2665 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2667 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2668 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2670 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2671 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2673 /* Populate the LUT with max no. of queues in round robin fashion */
2674 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2675 if (j == vsi->num_queues)
2679 * Fetch the RSS bucket id for the given indirection entry.
2680 * Cap it at the number of configured buckets (which is
2683 que_id = rss_get_indirection_to_bucket(i);
2684 que_id = que_id % vsi->num_queues;
2688 /* lut = 4-byte sliding window of 4 lut entries */
2689 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2690 /* On i = 3, we have 4 entries in lut; write to the register */
2692 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2693 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2700 ixlv_config_rss_pf(struct ixlv_sc *sc)
2702 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2703 IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2705 ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2706 IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2708 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2709 IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2713 ** ixlv_config_rss - setup RSS
2715 ** RSS keys and table are cleared on VF reset.
2718 ixlv_config_rss(struct ixlv_sc *sc)
2720 if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2721 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2722 ixlv_config_rss_reg(sc);
2723 } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2724 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2725 ixlv_config_rss_pf(sc);
2727 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2731 ** This routine refreshes vlan filters, called by init
2732 ** it scans the filter table and then updates the AQ
2735 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2737 struct ixl_vsi *vsi = &sc->vsi;
2738 struct ixlv_vlan_filter *f;
2741 if (vsi->num_vlans == 0)
2744 ** Scan the filter table for vlan entries,
2745 ** and if found call for the AQ update.
2747 SLIST_FOREACH(f, sc->vlan_filters, next)
2748 if (f->flags & IXL_FILTER_ADD)
2751 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2752 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2757 ** This routine adds new MAC filters to the sc's list;
2758 ** these are later added in hardware by sending a virtual
2762 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2764 struct ixlv_mac_filter *f;
2766 /* Does one already exist? */
2767 f = ixlv_find_mac_filter(sc, macaddr);
2769 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2770 MAC_FORMAT_ARGS(macaddr));
2774 /* If not, get a new empty filter */
2775 f = ixlv_get_mac_filter(sc);
2777 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2782 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2783 MAC_FORMAT_ARGS(macaddr));
2785 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2786 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2792 ** Marks a MAC filter for deletion.
2795 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2797 struct ixlv_mac_filter *f;
2799 f = ixlv_find_mac_filter(sc, macaddr);
2803 f->flags |= IXL_FILTER_DEL;
2808 ** Tasklet handler for MSIX Adminq interrupts
2809 ** - done outside interrupt context since it might sleep
2812 ixlv_do_adminq(void *context, int pending)
2814 struct ixlv_sc *sc = context;
2817 ixlv_do_adminq_locked(sc);
2818 mtx_unlock(&sc->mtx);
2823 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2825 struct i40e_hw *hw = &sc->hw;
2826 struct i40e_arq_event_info event;
2827 struct i40e_virtchnl_msg *v_msg;
2828 device_t dev = sc->dev;
2832 bool aq_error = false;
2834 IXLV_CORE_LOCK_ASSERT(sc);
2836 event.buf_len = IXL_AQ_BUF_SZ;
2837 event.msg_buf = sc->aq_buffer;
2838 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2841 ret = i40e_clean_arq_element(hw, &event, &result);
2844 ixlv_vc_completion(sc, v_msg->v_opcode,
2845 v_msg->v_retval, event.msg_buf, event.msg_len);
2847 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2850 /* check for Admin queue errors */
2851 oldreg = reg = rd32(hw, hw->aq.arq.len);
2852 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2853 device_printf(dev, "ARQ VF Error detected\n");
2854 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2857 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2858 device_printf(dev, "ARQ Overflow Error detected\n");
2859 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2862 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2863 device_printf(dev, "ARQ Critical Error detected\n");
2864 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2868 wr32(hw, hw->aq.arq.len, reg);
2870 oldreg = reg = rd32(hw, hw->aq.asq.len);
2871 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2872 device_printf(dev, "ASQ VF Error detected\n");
2873 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2876 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2877 device_printf(dev, "ASQ Overflow Error detected\n");
2878 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2881 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2882 device_printf(dev, "ASQ Critical Error detected\n");
2883 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2887 wr32(hw, hw->aq.asq.len, reg);
2890 /* Need to reset adapter */
2891 device_printf(dev, "WARNING: Resetting!\n");
2892 sc->init_state = IXLV_RESET_REQUIRED;
2894 ixlv_init_locked(sc);
2896 ixlv_enable_adminq_irq(hw);
2900 ixlv_add_sysctls(struct ixlv_sc *sc)
2902 device_t dev = sc->dev;
2903 struct ixl_vsi *vsi = &sc->vsi;
2904 struct i40e_eth_stats *es = &vsi->eth_stats;
2906 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2907 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2908 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2910 struct sysctl_oid *vsi_node, *queue_node;
2911 struct sysctl_oid_list *vsi_list, *queue_list;
2913 #define QUEUE_NAME_LEN 32
2914 char queue_namebuf[QUEUE_NAME_LEN];
2916 struct ixl_queue *queues = vsi->queues;
2917 struct tx_ring *txr;
2918 struct rx_ring *rxr;
2920 /* Driver statistics sysctls */
2921 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2922 CTLFLAG_RD, &sc->watchdog_events,
2923 "Watchdog timeouts");
2924 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2925 CTLFLAG_RD, &sc->admin_irq,
2926 "Admin Queue IRQ Handled");
2928 /* VSI statistics sysctls */
2929 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2930 CTLFLAG_RD, NULL, "VSI-specific statistics");
2931 vsi_list = SYSCTL_CHILDREN(vsi_node);
2933 struct ixl_sysctl_info ctls[] =
2935 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2936 {&es->rx_unicast, "ucast_pkts_rcvd",
2937 "Unicast Packets Received"},
2938 {&es->rx_multicast, "mcast_pkts_rcvd",
2939 "Multicast Packets Received"},
2940 {&es->rx_broadcast, "bcast_pkts_rcvd",
2941 "Broadcast Packets Received"},
2942 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2943 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2944 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2945 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2946 {&es->tx_multicast, "mcast_pkts_txd",
2947 "Multicast Packets Transmitted"},
2948 {&es->tx_broadcast, "bcast_pkts_txd",
2949 "Broadcast Packets Transmitted"},
2950 {&es->tx_errors, "tx_errors", "TX packet errors"},
2954 struct ixl_sysctl_info *entry = ctls;
2955 while (entry->stat != NULL)
2957 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2958 CTLFLAG_RD, entry->stat,
2959 entry->description);
2964 for (int q = 0; q < vsi->num_queues; q++) {
2965 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2966 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2967 CTLFLAG_RD, NULL, "Queue Name");
2968 queue_list = SYSCTL_CHILDREN(queue_node);
2970 txr = &(queues[q].txr);
2971 rxr = &(queues[q].rxr);
2973 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2974 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2975 "m_defrag() failed");
2976 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2977 CTLFLAG_RD, &(queues[q].dropped_pkts),
2978 "Driver dropped packets");
2979 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2980 CTLFLAG_RD, &(queues[q].irqs),
2981 "irqs on this queue");
2982 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2983 CTLFLAG_RD, &(queues[q].tso),
2985 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2986 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2987 "Driver tx dma failure in xmit");
2988 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2989 CTLFLAG_RD, &(txr->no_desc),
2990 "Queue No Descriptor Available");
2991 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2992 CTLFLAG_RD, &(txr->total_packets),
2993 "Queue Packets Transmitted");
2994 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2995 CTLFLAG_RD, &(txr->tx_bytes),
2996 "Queue Bytes Transmitted");
2997 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2998 CTLFLAG_RD, &(rxr->rx_packets),
2999 "Queue Packets Received");
3000 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3001 CTLFLAG_RD, &(rxr->rx_bytes),
3002 "Queue Bytes Received");
3003 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3004 CTLFLAG_RD, &(rxr->itr), 0,
3005 "Queue Rx ITR Interval");
3006 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3007 CTLFLAG_RD, &(txr->itr), 0,
3008 "Queue Tx ITR Interval");
3011 /* Examine queue state */
3012 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
3013 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3014 sizeof(struct ixl_queue),
3015 ixlv_sysctl_qtx_tail_handler, "IU",
3016 "Queue Transmit Descriptor Tail");
3017 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
3018 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3019 sizeof(struct ixl_queue),
3020 ixlv_sysctl_qrx_tail_handler, "IU",
3021 "Queue Receive Descriptor Tail");
3022 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3023 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3024 "Ticks before watchdog event is triggered");
3030 ixlv_init_filters(struct ixlv_sc *sc)
3032 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3033 M_DEVBUF, M_NOWAIT | M_ZERO);
3034 SLIST_INIT(sc->mac_filters);
3035 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3036 M_DEVBUF, M_NOWAIT | M_ZERO);
3037 SLIST_INIT(sc->vlan_filters);
3042 ixlv_free_filters(struct ixlv_sc *sc)
3044 struct ixlv_mac_filter *f;
3045 struct ixlv_vlan_filter *v;
3047 while (!SLIST_EMPTY(sc->mac_filters)) {
3048 f = SLIST_FIRST(sc->mac_filters);
3049 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3052 while (!SLIST_EMPTY(sc->vlan_filters)) {
3053 v = SLIST_FIRST(sc->vlan_filters);
3054 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3062 * ixlv_sysctl_qtx_tail_handler
3063 * Retrieves I40E_QTX_TAIL1 value from hardware
3067 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3069 struct ixl_queue *que;
3073 que = ((struct ixl_queue *)oidp->oid_arg1);
3076 val = rd32(que->vsi->hw, que->txr.tail);
3077 error = sysctl_handle_int(oidp, &val, 0, req);
3078 if (error || !req->newptr)
3084 * ixlv_sysctl_qrx_tail_handler
3085 * Retrieves I40E_QRX_TAIL1 value from hardware
3089 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3091 struct ixl_queue *que;
3095 que = ((struct ixl_queue *)oidp->oid_arg1);
3098 val = rd32(que->vsi->hw, que->rxr.tail);
3099 error = sysctl_handle_int(oidp, &val, 0, req);
3100 if (error || !req->newptr)