1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
38 /*********************************************************************
40 *********************************************************************/
41 char ixlv_driver_version[] = "1.4.12-k";
43 /*********************************************************************
46 * Used by probe to select devices to load on
47 * Last field stores an index into ixlv_strings
48 * Last entry must be all 0s
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
51 *********************************************************************/
53 static ixl_vendor_info_t ixlv_vendor_info_array[] =
55 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
56 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
57 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixlv_strings[] = {
67 "Intel(R) Ethernet Connection XL710/X722 VF Driver"
71 /*********************************************************************
73 *********************************************************************/
74 static int ixlv_probe(device_t);
75 static int ixlv_attach(device_t);
76 static int ixlv_detach(device_t);
77 static int ixlv_shutdown(device_t);
78 static void ixlv_init_locked(struct ixlv_sc *);
79 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
80 static void ixlv_free_pci_resources(struct ixlv_sc *);
81 static int ixlv_assign_msix(struct ixlv_sc *);
82 static int ixlv_init_msix(struct ixlv_sc *);
83 static int ixlv_init_taskqueue(struct ixlv_sc *);
84 static int ixlv_setup_queues(struct ixlv_sc *);
85 static void ixlv_config_rss(struct ixlv_sc *);
86 static void ixlv_stop(struct ixlv_sc *);
87 static void ixlv_add_multi(struct ixl_vsi *);
88 static void ixlv_del_multi(struct ixl_vsi *);
89 static void ixlv_free_queues(struct ixl_vsi *);
90 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
91 static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
93 static int ixlv_media_change(struct ifnet *);
94 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
96 static void ixlv_local_timer(void *);
98 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void ixlv_init_filters(struct ixlv_sc *);
101 static void ixlv_free_filters(struct ixlv_sc *);
103 static void ixlv_msix_que(void *);
104 static void ixlv_msix_adminq(void *);
105 static void ixlv_do_adminq(void *, int);
106 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void ixlv_handle_que(void *, int);
108 static int ixlv_reset(struct ixlv_sc *);
109 static int ixlv_reset_complete(struct i40e_hw *);
110 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113 enum i40e_status_code);
114 static void ixlv_configure_itr(struct ixlv_sc *);
116 static void ixlv_enable_adminq_irq(struct i40e_hw *);
117 static void ixlv_disable_adminq_irq(struct i40e_hw *);
118 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
119 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
121 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
122 static void ixlv_register_vlan(void *, struct ifnet *, u16);
123 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
125 static void ixlv_init_hw(struct ixlv_sc *);
126 static int ixlv_setup_vc(struct ixlv_sc *);
127 static int ixlv_vf_config(struct ixlv_sc *);
129 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
130 struct ifnet *, int);
132 static void ixlv_add_sysctls(struct ixlv_sc *);
134 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
135 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
138 /*********************************************************************
139 * FreeBSD Device Interface Entry Points
140 *********************************************************************/
142 static device_method_t ixlv_methods[] = {
143 /* Device interface */
144 DEVMETHOD(device_probe, ixlv_probe),
145 DEVMETHOD(device_attach, ixlv_attach),
146 DEVMETHOD(device_detach, ixlv_detach),
147 DEVMETHOD(device_shutdown, ixlv_shutdown),
151 static driver_t ixlv_driver = {
152 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
155 devclass_t ixlv_devclass;
156 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
158 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
159 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
162 ** TUNEABLE PARAMETERS:
165 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
166 "IXLV driver parameters");
169 ** Number of descriptors per ring:
170 ** - TX and RX are the same size
172 static int ixlv_ringsz = IXL_DEFAULT_RING;
173 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
174 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
175 &ixlv_ringsz, 0, "Descriptor Ring Size");
177 /* Set to zero to auto calculate */
178 int ixlv_max_queues = 0;
179 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
180 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
181 &ixlv_max_queues, 0, "Number of Queues");
184 ** Number of entries in Tx queue buf_ring.
185 ** Increasing this will reduce the number of
186 ** errors when transmitting fragmented UDP
189 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
190 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
192 &ixlv_txbrsz, 0, "TX Buf Ring Size");
195 ** Controls for Interrupt Throttling
196 ** - true/false for dynamic adjustment
197 ** - default values for static ITR
199 int ixlv_dynamic_rx_itr = 0;
200 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
201 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
202 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
204 int ixlv_dynamic_tx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
207 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
209 int ixlv_rx_itr = IXL_ITR_8K;
210 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
212 &ixlv_rx_itr, 0, "RX Interrupt Rate");
214 int ixlv_tx_itr = IXL_ITR_4K;
215 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
217 &ixlv_tx_itr, 0, "TX Interrupt Rate");
219 /*********************************************************************
220 * Device identification routine
222 * ixlv_probe determines if the driver should be loaded on
223 * the hardware based on PCI vendor/device id of the device.
225 * return BUS_PROBE_DEFAULT on success, positive on failure
226 *********************************************************************/
229 ixlv_probe(device_t dev)
231 ixl_vendor_info_t *ent;
233 u16 pci_vendor_id, pci_device_id;
234 u16 pci_subvendor_id, pci_subdevice_id;
235 char device_name[256];
238 INIT_DEBUGOUT("ixlv_probe: begin");
241 pci_vendor_id = pci_get_vendor(dev);
242 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
245 pci_device_id = pci_get_device(dev);
246 pci_subvendor_id = pci_get_subvendor(dev);
247 pci_subdevice_id = pci_get_subdevice(dev);
249 ent = ixlv_vendor_info_array;
250 while (ent->vendor_id != 0) {
251 if ((pci_vendor_id == ent->vendor_id) &&
252 (pci_device_id == ent->device_id) &&
254 ((pci_subvendor_id == ent->subvendor_id) ||
255 (ent->subvendor_id == 0)) &&
257 ((pci_subdevice_id == ent->subdevice_id) ||
258 (ent->subdevice_id == 0))) {
259 sprintf(device_name, "%s, Version - %s",
260 ixlv_strings[ent->index],
261 ixlv_driver_version);
262 device_set_desc_copy(dev, device_name);
263 return (BUS_PROBE_DEFAULT);
270 /*********************************************************************
271 * Device initialization routine
273 * The attach entry point is called when the driver is being loaded.
274 * This routine identifies the type of hardware, allocates all resources
275 * and initializes the hardware.
277 * return 0 on success, positive on failure
278 *********************************************************************/
281 ixlv_attach(device_t dev)
288 INIT_DBG_DEV(dev, "begin");
290 /* Allocate, clear, and link in our primary soft structure */
291 sc = device_get_softc(dev);
292 sc->dev = sc->osdep.dev = dev;
297 /* Initialize hw struct */
300 /* Allocate filter lists */
301 ixlv_init_filters(sc);
304 mtx_init(&sc->mtx, device_get_nameunit(dev),
305 "IXL SC Lock", MTX_DEF);
307 /* Set up the timer callout */
308 callout_init_mtx(&sc->timer, &sc->mtx, 0);
310 /* Do PCI setup - map BAR0, etc */
311 if (ixlv_allocate_pci_resources(sc)) {
312 device_printf(dev, "%s: Allocation of PCI resources failed\n",
318 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
320 error = i40e_set_mac_type(hw);
322 device_printf(dev, "%s: set_mac_type failed: %d\n",
327 error = ixlv_reset_complete(hw);
329 device_printf(dev, "%s: Device is still being reset\n",
334 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
336 error = ixlv_setup_vc(sc);
338 device_printf(dev, "%s: Error setting up PF comms, %d\n",
343 INIT_DBG_DEV(dev, "PF API version verified");
345 /* Need API version before sending reset message */
346 error = ixlv_reset(sc);
348 device_printf(dev, "VF reset failed; reload the driver\n");
352 INIT_DBG_DEV(dev, "VF reset complete");
354 /* Ask for VF config from PF */
355 error = ixlv_vf_config(sc);
357 device_printf(dev, "Error getting configuration from PF: %d\n",
362 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
363 sc->vf_res->num_vsis,
364 sc->vf_res->num_queue_pairs,
365 sc->vf_res->max_vectors,
366 sc->vf_res->rss_key_size,
367 sc->vf_res->rss_lut_size);
369 device_printf(dev, "Offload flags: 0x%b\n",
370 sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
373 /* got VF config message back from PF, now we can parse it */
374 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
375 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
376 sc->vsi_res = &sc->vf_res->vsi_res[i];
379 device_printf(dev, "%s: no LAN VSI found\n", __func__);
384 INIT_DBG_DEV(dev, "Resource Acquisition complete");
386 /* If no mac address was assigned just make a random one */
387 if (!ixlv_check_ether_addr(hw->mac.addr)) {
388 u8 addr[ETHER_ADDR_LEN];
389 arc4rand(&addr, sizeof(addr), 0);
392 bcopy(addr, hw->mac.addr, sizeof(addr));
395 /* Now that the number of queues for this VF is known, set up interrupts */
396 sc->msix = ixlv_init_msix(sc);
397 /* We fail without MSIX support */
403 vsi->id = sc->vsi_res->vsi_id;
404 vsi->back = (void *)sc;
407 /* This allocates the memory and early settings */
408 if (ixlv_setup_queues(sc) != 0) {
409 device_printf(dev, "%s: setup queues failed!\n",
415 /* Setup the stack interface */
416 if (ixlv_setup_interface(dev, sc) != 0) {
417 device_printf(dev, "%s: setup interface failed!\n",
423 INIT_DBG_DEV(dev, "Queue memory and interface setup");
425 /* Do queue interrupt setup */
426 if (ixlv_assign_msix(sc) != 0) {
427 device_printf(dev, "%s: allocating queue interrupts failed!\n",
433 /* Start AdminQ taskqueue */
434 ixlv_init_taskqueue(sc);
436 /* Initialize stats */
437 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
438 ixlv_add_sysctls(sc);
440 /* Register for VLAN events */
441 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
442 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
443 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
444 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
446 /* We want AQ enabled early */
447 ixlv_enable_adminq_irq(hw);
449 /* Set things up to run init */
450 sc->init_state = IXLV_INIT_READY;
452 ixl_vc_init_mgr(sc, &sc->vc_mgr);
454 INIT_DBG_DEV(dev, "end");
458 ixlv_free_queues(vsi);
460 free(sc->vf_res, M_DEVBUF);
462 i40e_shutdown_adminq(hw);
464 ixlv_free_pci_resources(sc);
466 mtx_destroy(&sc->mtx);
467 ixlv_free_filters(sc);
468 INIT_DBG_DEV(dev, "end: error %d", error);
472 /*********************************************************************
473 * Device removal routine
475 * The detach entry point is called when the driver is being removed.
476 * This routine stops the adapter and deallocates all the resources
477 * that were allocated for driver operation.
479 * return 0 on success, positive on failure
480 *********************************************************************/
483 ixlv_detach(device_t dev)
485 struct ixlv_sc *sc = device_get_softc(dev);
486 struct ixl_vsi *vsi = &sc->vsi;
487 struct i40e_hw *hw = &sc->hw;
488 enum i40e_status_code status;
490 INIT_DBG_DEV(dev, "begin");
492 /* Make sure VLANS are not using driver */
493 if (vsi->ifp->if_vlantrunk != NULL) {
494 if_printf(vsi->ifp, "Vlan in use, detach first\n");
499 ether_ifdetach(vsi->ifp);
500 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
503 mtx_unlock(&sc->mtx);
506 /* Unregister VLAN events */
507 if (vsi->vlan_attach != NULL)
508 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
509 if (vsi->vlan_detach != NULL)
510 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
513 callout_drain(&sc->vc_mgr.callout);
515 ixlv_disable_adminq_irq(hw);
516 ixlv_teardown_adminq_msix(sc);
517 /* Drain admin queue taskqueue */
518 taskqueue_free(sc->tq);
519 status = i40e_shutdown_adminq(&sc->hw);
520 if (status != I40E_SUCCESS) {
522 "i40e_shutdown_adminq() failed with status %s\n",
523 i40e_stat_str(hw, status));
527 free(sc->vf_res, M_DEVBUF);
528 ixlv_free_pci_resources(sc);
529 ixlv_free_queues(vsi);
530 ixlv_free_filters(sc);
532 bus_generic_detach(dev);
533 mtx_destroy(&sc->mtx);
534 INIT_DBG_DEV(dev, "end");
538 /*********************************************************************
540 * Shutdown entry point
542 **********************************************************************/
545 ixlv_shutdown(device_t dev)
547 struct ixlv_sc *sc = device_get_softc(dev);
549 INIT_DBG_DEV(dev, "begin");
553 mtx_unlock(&sc->mtx);
555 INIT_DBG_DEV(dev, "end");
560 * Configure TXCSUM(IPV6) and TSO(4/6)
561 * - the hardware handles these together so we
565 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
567 /* Enable/disable TXCSUM/TSO4 */
568 if (!(ifp->if_capenable & IFCAP_TXCSUM)
569 && !(ifp->if_capenable & IFCAP_TSO4)) {
570 if (mask & IFCAP_TXCSUM) {
571 ifp->if_capenable |= IFCAP_TXCSUM;
572 /* enable TXCSUM, restore TSO if previously enabled */
573 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
574 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
575 ifp->if_capenable |= IFCAP_TSO4;
578 else if (mask & IFCAP_TSO4) {
579 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
580 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
582 "TSO4 requires txcsum, enabling both...\n");
584 } else if((ifp->if_capenable & IFCAP_TXCSUM)
585 && !(ifp->if_capenable & IFCAP_TSO4)) {
586 if (mask & IFCAP_TXCSUM)
587 ifp->if_capenable &= ~IFCAP_TXCSUM;
588 else if (mask & IFCAP_TSO4)
589 ifp->if_capenable |= IFCAP_TSO4;
590 } else if((ifp->if_capenable & IFCAP_TXCSUM)
591 && (ifp->if_capenable & IFCAP_TSO4)) {
592 if (mask & IFCAP_TXCSUM) {
593 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
594 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
596 "TSO4 requires txcsum, disabling both...\n");
597 } else if (mask & IFCAP_TSO4)
598 ifp->if_capenable &= ~IFCAP_TSO4;
601 /* Enable/disable TXCSUM_IPV6/TSO6 */
602 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603 && !(ifp->if_capenable & IFCAP_TSO6)) {
604 if (mask & IFCAP_TXCSUM_IPV6) {
605 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
606 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
607 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
608 ifp->if_capenable |= IFCAP_TSO6;
610 } else if (mask & IFCAP_TSO6) {
611 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
612 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
614 "TSO6 requires txcsum6, enabling both...\n");
616 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
617 && !(ifp->if_capenable & IFCAP_TSO6)) {
618 if (mask & IFCAP_TXCSUM_IPV6)
619 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
620 else if (mask & IFCAP_TSO6)
621 ifp->if_capenable |= IFCAP_TSO6;
622 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
623 && (ifp->if_capenable & IFCAP_TSO6)) {
624 if (mask & IFCAP_TXCSUM_IPV6) {
625 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
626 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
628 "TSO6 requires txcsum6, disabling both...\n");
629 } else if (mask & IFCAP_TSO6)
630 ifp->if_capenable &= ~IFCAP_TSO6;
634 /*********************************************************************
637 * ixlv_ioctl is called when the user wants to configure the
640 * return 0 on success, positive on failure
641 **********************************************************************/
644 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
646 struct ixl_vsi *vsi = ifp->if_softc;
647 struct ixlv_sc *sc = vsi->back;
648 struct ifreq *ifr = (struct ifreq *)data;
649 #if defined(INET) || defined(INET6)
650 struct ifaddr *ifa = (struct ifaddr *)data;
651 bool avoid_reset = FALSE;
660 if (ifa->ifa_addr->sa_family == AF_INET)
664 if (ifa->ifa_addr->sa_family == AF_INET6)
667 #if defined(INET) || defined(INET6)
669 ** Calling init results in link renegotiation,
670 ** so we avoid doing it when possible.
673 ifp->if_flags |= IFF_UP;
674 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
677 if (!(ifp->if_flags & IFF_NOARP))
678 arp_ifinit(ifp, ifa);
681 error = ether_ioctl(ifp, command, data);
685 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
687 if (ifr->ifr_mtu > IXL_MAX_FRAME -
688 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
690 IOCTL_DBG_IF(ifp, "mtu too large");
692 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
693 // ERJ: Interestingly enough, these types don't match
694 ifp->if_mtu = (u_long)ifr->ifr_mtu;
695 vsi->max_frame_size =
696 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
697 + ETHER_VLAN_ENCAP_LEN;
698 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
699 ixlv_init_locked(sc);
701 mtx_unlock(&sc->mtx);
704 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
706 if (ifp->if_flags & IFF_UP) {
707 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
708 ixlv_init_locked(sc);
710 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
712 sc->if_flags = ifp->if_flags;
713 mtx_unlock(&sc->mtx);
716 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
717 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
719 ixlv_disable_intr(vsi);
721 ixlv_enable_intr(vsi);
722 mtx_unlock(&sc->mtx);
726 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
727 if (sc->init_state == IXLV_RUNNING) {
729 ixlv_disable_intr(vsi);
731 ixlv_enable_intr(vsi);
732 mtx_unlock(&sc->mtx);
737 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
738 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
742 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
743 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
745 ixlv_cap_txcsum_tso(vsi, ifp, mask);
747 if (mask & IFCAP_RXCSUM)
748 ifp->if_capenable ^= IFCAP_RXCSUM;
749 if (mask & IFCAP_RXCSUM_IPV6)
750 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
751 if (mask & IFCAP_LRO)
752 ifp->if_capenable ^= IFCAP_LRO;
753 if (mask & IFCAP_VLAN_HWTAGGING)
754 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
755 if (mask & IFCAP_VLAN_HWFILTER)
756 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
757 if (mask & IFCAP_VLAN_HWTSO)
758 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
762 VLAN_CAPABILITIES(ifp);
768 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
769 error = ether_ioctl(ifp, command, data);
777 ** To do a reinit on the VF is unfortunately more complicated
778 ** than a physical device, we must have the PF more or less
779 ** completely recreate our memory, so many things that were
780 ** done only once at attach in traditional drivers now must be
781 ** redone at each reinitialization. This function does that
782 ** 'prelude' so we can then call the normal locked init code.
785 ixlv_reinit_locked(struct ixlv_sc *sc)
787 struct i40e_hw *hw = &sc->hw;
788 struct ixl_vsi *vsi = &sc->vsi;
789 struct ifnet *ifp = vsi->ifp;
790 struct ixlv_mac_filter *mf, *mf_temp;
791 struct ixlv_vlan_filter *vf;
794 INIT_DBG_IF(ifp, "begin");
796 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
799 error = ixlv_reset(sc);
801 INIT_DBG_IF(ifp, "VF was reset");
803 /* set the state in case we went thru RESET */
804 sc->init_state = IXLV_RUNNING;
807 ** Resetting the VF drops all filters from hardware;
808 ** we need to mark them to be re-added in init.
810 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
811 if (mf->flags & IXL_FILTER_DEL) {
812 SLIST_REMOVE(sc->mac_filters, mf,
813 ixlv_mac_filter, next);
816 mf->flags |= IXL_FILTER_ADD;
818 if (vsi->num_vlans != 0)
819 SLIST_FOREACH(vf, sc->vlan_filters, next)
820 vf->flags = IXL_FILTER_ADD;
821 else { /* clean any stale filters */
822 while (!SLIST_EMPTY(sc->vlan_filters)) {
823 vf = SLIST_FIRST(sc->vlan_filters);
824 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
829 ixlv_enable_adminq_irq(hw);
830 ixl_vc_flush(&sc->vc_mgr);
832 INIT_DBG_IF(ifp, "end");
837 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
838 enum i40e_status_code code)
845 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
846 * happens while a command is in progress, so we don't print an error
849 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
850 if_printf(sc->vsi.ifp,
851 "Error %s waiting for PF to complete operation %d\n",
852 i40e_stat_str(&sc->hw, code), cmd->request);
857 ixlv_init_locked(struct ixlv_sc *sc)
859 struct i40e_hw *hw = &sc->hw;
860 struct ixl_vsi *vsi = &sc->vsi;
861 struct ixl_queue *que = vsi->queues;
862 struct ifnet *ifp = vsi->ifp;
865 INIT_DBG_IF(ifp, "begin");
867 IXLV_CORE_LOCK_ASSERT(sc);
869 /* Do a reinit first if an init has already been done */
870 if ((sc->init_state == IXLV_RUNNING) ||
871 (sc->init_state == IXLV_RESET_REQUIRED) ||
872 (sc->init_state == IXLV_RESET_PENDING))
873 error = ixlv_reinit_locked(sc);
874 /* Don't bother with init if we failed reinit */
878 /* Remove existing MAC filter if new MAC addr is set */
879 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
880 error = ixlv_del_mac_filter(sc, hw->mac.addr);
882 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
883 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
887 /* Check for an LAA mac address... */
888 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
890 ifp->if_hwassist = 0;
891 if (ifp->if_capenable & IFCAP_TSO)
892 ifp->if_hwassist |= CSUM_TSO;
893 if (ifp->if_capenable & IFCAP_TXCSUM)
894 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
895 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
896 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
898 /* Add mac filter for this VF to PF */
899 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
900 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
901 if (!error || error == EEXIST)
902 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
903 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
907 /* Setup vlan's if needed */
908 ixlv_setup_vlan_filters(sc);
910 /* Prepare the queues for operation */
911 for (int i = 0; i < vsi->num_queues; i++, que++) {
912 struct rx_ring *rxr = &que->rxr;
914 ixl_init_tx_ring(que);
916 if (vsi->max_frame_size <= MCLBYTES)
917 rxr->mbuf_sz = MCLBYTES;
919 rxr->mbuf_sz = MJUMPAGESIZE;
920 ixl_init_rx_ring(que);
923 /* Set initial ITR values */
924 ixlv_configure_itr(sc);
926 /* Configure queues */
927 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
928 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
934 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
935 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
938 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
939 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
941 /* Start the local timer */
942 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
944 sc->init_state = IXLV_RUNNING;
947 INIT_DBG_IF(ifp, "end");
952 ** Init entry point for the stack
957 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
958 struct ixlv_sc *sc = vsi->back;
961 /* Prevent init from running again while waiting for AQ calls
962 * made in init_locked() to complete. */
964 if (sc->init_in_progress) {
965 mtx_unlock(&sc->mtx);
968 sc->init_in_progress = true;
970 ixlv_init_locked(sc);
971 mtx_unlock(&sc->mtx);
973 /* Wait for init_locked to finish */
974 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
975 && ++retries < IXLV_MAX_INIT_WAIT) {
978 if (retries >= IXLV_MAX_INIT_WAIT) {
980 "Init failed to complete in allotted time!\n");
984 sc->init_in_progress = false;
985 mtx_unlock(&sc->mtx);
989 * ixlv_attach() helper function; gathers information about
990 * the (virtual) hardware for use elsewhere in the driver.
993 ixlv_init_hw(struct ixlv_sc *sc)
995 struct i40e_hw *hw = &sc->hw;
996 device_t dev = sc->dev;
998 /* Save off the information about this board */
999 hw->vendor_id = pci_get_vendor(dev);
1000 hw->device_id = pci_get_device(dev);
1001 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1002 hw->subsystem_vendor_id =
1003 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1004 hw->subsystem_device_id =
1005 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1007 hw->bus.device = pci_get_slot(dev);
1008 hw->bus.func = pci_get_function(dev);
1012 * ixlv_attach() helper function; initalizes the admin queue
1013 * and attempts to establish contact with the PF by
1014 * retrying the initial "API version" message several times
1015 * or until the PF responds.
1018 ixlv_setup_vc(struct ixlv_sc *sc)
1020 struct i40e_hw *hw = &sc->hw;
1021 device_t dev = sc->dev;
1022 int error = 0, ret_error = 0, asq_retries = 0;
1023 bool send_api_ver_retried = 0;
1025 /* Need to set these AQ paramters before initializing AQ */
1026 hw->aq.num_arq_entries = IXL_AQ_LEN;
1027 hw->aq.num_asq_entries = IXL_AQ_LEN;
1028 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1029 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1031 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1032 /* Initialize admin queue */
1033 error = i40e_init_adminq(hw);
1035 device_printf(dev, "%s: init_adminq failed: %d\n",
1041 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1042 " send_api_ver attempt %d", i+1);
1045 /* Send VF's API version */
1046 error = ixlv_send_api_ver(sc);
1048 i40e_shutdown_adminq(hw);
1050 device_printf(dev, "%s: unable to send api"
1051 " version to PF on attempt %d, error %d\n",
1052 __func__, i+1, error);
1056 while (!i40e_asq_done(hw)) {
1057 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1058 i40e_shutdown_adminq(hw);
1059 device_printf(dev, "Admin Queue timeout "
1060 "(waiting for send_api_ver), %d more tries...\n",
1061 IXLV_AQ_MAX_ERR - (i + 1));
1065 i40e_msec_pause(10);
1067 if (asq_retries > IXLV_AQ_MAX_ERR)
1070 INIT_DBG_DEV(dev, "Sent API version message to PF");
1072 /* Verify that the VF accepts the PF's API version */
1073 error = ixlv_verify_api_ver(sc);
1074 if (error == ETIMEDOUT) {
1075 if (!send_api_ver_retried) {
1076 /* Resend message, one more time */
1077 send_api_ver_retried = true;
1079 "%s: Timeout while verifying API version on first"
1080 " try!\n", __func__);
1084 "%s: Timeout while verifying API version on second"
1085 " try!\n", __func__);
1092 "%s: Unable to verify API version,"
1093 " error %s\n", __func__, i40e_stat_str(hw, error));
1100 i40e_shutdown_adminq(hw);
1105 * ixlv_attach() helper function; asks the PF for this VF's
1106 * configuration, and saves the information if it receives it.
1109 ixlv_vf_config(struct ixlv_sc *sc)
1111 struct i40e_hw *hw = &sc->hw;
1112 device_t dev = sc->dev;
1113 int bufsz, error = 0, ret_error = 0;
1114 int asq_retries, retried = 0;
1117 error = ixlv_send_vf_config_msg(sc);
1120 "%s: Unable to send VF config request, attempt %d,"
1121 " error %d\n", __func__, retried + 1, error);
1126 while (!i40e_asq_done(hw)) {
1127 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1128 device_printf(dev, "%s: Admin Queue timeout "
1129 "(waiting for send_vf_config_msg), attempt %d\n",
1130 __func__, retried + 1);
1134 i40e_msec_pause(10);
1137 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1141 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1142 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1143 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1146 "%s: Unable to allocate memory for VF configuration"
1147 " message from PF on attempt %d\n", __func__, retried + 1);
1153 /* Check for VF config response */
1154 error = ixlv_get_vf_config(sc);
1155 if (error == ETIMEDOUT) {
1156 /* The 1st time we timeout, send the configuration message again */
1162 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1167 "%s: Unable to get VF configuration from PF after %d tries!\n",
1168 __func__, retried + 1);
1174 free(sc->vf_res, M_DEVBUF);
1180 * Allocate MSI/X vectors, setup the AQ vector early
1183 ixlv_init_msix(struct ixlv_sc *sc)
1185 device_t dev = sc->dev;
1186 int rid, want, vectors, queues, available;
1187 int auto_max_queues;
1189 rid = PCIR_BAR(IXL_MSIX_BAR);
1190 sc->msix_mem = bus_alloc_resource_any(dev,
1191 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1192 if (!sc->msix_mem) {
1193 /* May not be enabled */
1194 device_printf(sc->dev,
1195 "Unable to map MSIX table\n");
1199 available = pci_msix_count(dev);
1200 if (available == 0) { /* system has msix disabled */
1201 bus_release_resource(dev, SYS_RES_MEMORY,
1203 sc->msix_mem = NULL;
1207 /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1208 auto_max_queues = min(mp_ncpus, available - 1);
1209 /* Clamp queues to # assigned to VF by PF */
1210 auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1212 /* Override with tunable value if tunable is less than autoconfig count */
1213 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1214 queues = ixlv_max_queues;
1215 /* Use autoconfig amount if that's lower */
1216 else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1217 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1218 "autoconfig amount (%d)...\n",
1219 ixlv_max_queues, auto_max_queues);
1220 queues = auto_max_queues;
1222 /* Limit maximum auto-configured queues to 8 if no user value is set */
1224 queues = min(auto_max_queues, 8);
1227 /* If we're doing RSS, clamp at the number of RSS buckets */
1228 if (queues > rss_getnumbuckets())
1229 queues = rss_getnumbuckets();
1233 ** Want one vector (RX/TX pair) per queue
1234 ** plus an additional for the admin queue.
1237 if (want <= available) /* Have enough */
1240 device_printf(sc->dev,
1241 "MSIX Configuration Problem, "
1242 "%d vectors available but %d wanted!\n",
1249 * If we're doing RSS, the number of queues needs to
1250 * match the number of RSS buckets that are configured.
1252 * + If there's more queues than RSS buckets, we'll end
1253 * up with queues that get no traffic.
1255 * + If there's more RSS buckets than queues, we'll end
1256 * up having multiple RSS buckets map to the same queue,
1257 * so there'll be some contention.
1259 if (queues != rss_getnumbuckets()) {
1261 "%s: queues (%d) != RSS buckets (%d)"
1262 "; performance will be impacted.\n",
1263 __func__, queues, rss_getnumbuckets());
1267 if (pci_alloc_msix(dev, &vectors) == 0) {
1268 device_printf(sc->dev,
1269 "Using MSIX interrupts with %d vectors\n", vectors);
1271 sc->vsi.num_queues = queues;
1274 /* Next we need to setup the vector for the Admin Queue */
1275 rid = 1; /* zero vector + 1 */
1276 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1277 &rid, RF_SHAREABLE | RF_ACTIVE);
1278 if (sc->res == NULL) {
1279 device_printf(dev, "Unable to allocate"
1280 " bus resource: AQ interrupt \n");
1283 if (bus_setup_intr(dev, sc->res,
1284 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1285 ixlv_msix_adminq, sc, &sc->tag)) {
1287 device_printf(dev, "Failed to register AQ handler");
1290 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1295 /* The VF driver MUST use MSIX */
1300 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1303 device_t dev = sc->dev;
1306 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1309 if (!(sc->pci_mem)) {
1310 device_printf(dev, "Unable to allocate bus resource: memory\n");
1314 sc->osdep.mem_bus_space_tag =
1315 rman_get_bustag(sc->pci_mem);
1316 sc->osdep.mem_bus_space_handle =
1317 rman_get_bushandle(sc->pci_mem);
1318 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1319 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1320 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1322 sc->hw.back = &sc->osdep;
1325 ** Explicitly set the guest PCI BUSMASTER capability
1326 ** and we must rewrite the ENABLE in the MSIX control
1327 ** register again at this point to cause the host to
1328 ** successfully initialize us.
1330 ** This must be set before accessing any registers.
1335 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1336 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1337 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1338 pci_find_cap(dev, PCIY_MSIX, &rid);
1339 rid += PCIR_MSIX_CTRL;
1340 msix_ctrl = pci_read_config(dev, rid, 2);
1341 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1342 pci_write_config(dev, rid, msix_ctrl, 2);
1345 /* Disable adminq interrupts (just in case) */
1346 ixlv_disable_adminq_irq(&sc->hw);
1352 ixlv_free_pci_resources(struct ixlv_sc *sc)
1354 struct ixl_vsi *vsi = &sc->vsi;
1355 struct ixl_queue *que = vsi->queues;
1356 device_t dev = sc->dev;
1358 /* We may get here before stations are setup */
1363 ** Release all msix queue resources:
1365 for (int i = 0; i < vsi->num_queues; i++, que++) {
1366 int rid = que->msix + 1;
1367 if (que->tag != NULL) {
1368 bus_teardown_intr(dev, que->res, que->tag);
1371 if (que->res != NULL) {
1372 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1378 pci_release_msi(dev);
1380 if (sc->msix_mem != NULL)
1381 bus_release_resource(dev, SYS_RES_MEMORY,
1382 PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1384 if (sc->pci_mem != NULL)
1385 bus_release_resource(dev, SYS_RES_MEMORY,
1386 PCIR_BAR(0), sc->pci_mem);
1390 * Create taskqueue and tasklet for Admin Queue interrupts.
1393 ixlv_init_taskqueue(struct ixlv_sc *sc)
1397 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1399 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1400 taskqueue_thread_enqueue, &sc->tq);
1401 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1402 device_get_nameunit(sc->dev));
1407 /*********************************************************************
1409 * Setup MSIX Interrupt resources and handlers for the VSI queues
1411 **********************************************************************/
1413 ixlv_assign_msix(struct ixlv_sc *sc)
1415 device_t dev = sc->dev;
1416 struct ixl_vsi *vsi = &sc->vsi;
1417 struct ixl_queue *que = vsi->queues;
1418 struct tx_ring *txr;
1419 int error, rid, vector = 1;
1424 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1428 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1429 RF_SHAREABLE | RF_ACTIVE);
1430 if (que->res == NULL) {
1431 device_printf(dev,"Unable to allocate"
1432 " bus resource: que interrupt [%d]\n", vector);
1435 /* Set the handler function */
1436 error = bus_setup_intr(dev, que->res,
1437 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1438 ixlv_msix_que, que, &que->tag);
1441 device_printf(dev, "Failed to register que handler");
1444 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1445 /* Bind the vector to a CPU */
1447 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1449 bus_bind_intr(dev, que->res, cpu_id);
1451 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1452 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1453 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1454 taskqueue_thread_enqueue, &que->tq);
1456 CPU_SETOF(cpu_id, &cpu_mask);
1457 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1458 &cpu_mask, "%s (bucket %d)",
1459 device_get_nameunit(dev), cpu_id);
1461 taskqueue_start_threads(&que->tq, 1, PI_NET,
1462 "%s que", device_get_nameunit(dev));
1471 ** Requests a VF reset from the PF.
1473 ** Requires the VF's Admin Queue to be initialized.
1476 ixlv_reset(struct ixlv_sc *sc)
1478 struct i40e_hw *hw = &sc->hw;
1479 device_t dev = sc->dev;
1482 /* Ask the PF to reset us if we are initiating */
1483 if (sc->init_state != IXLV_RESET_PENDING)
1484 ixlv_request_reset(sc);
1486 i40e_msec_pause(100);
1487 error = ixlv_reset_complete(hw);
1489 device_printf(dev, "%s: VF reset failed\n",
1494 error = i40e_shutdown_adminq(hw);
1496 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1501 error = i40e_init_adminq(hw);
1503 device_printf(dev, "%s: init_adminq failed: %d\n",
1512 ixlv_reset_complete(struct i40e_hw *hw)
1516 /* Wait up to ~10 seconds */
1517 for (int i = 0; i < 100; i++) {
1518 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1519 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1521 if ((reg == I40E_VFR_VFACTIVE) ||
1522 (reg == I40E_VFR_COMPLETED))
1524 i40e_msec_pause(100);
1531 /*********************************************************************
1533 * Setup networking device structure and register an interface.
1535 **********************************************************************/
1537 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1540 struct ixl_vsi *vsi = &sc->vsi;
1541 struct ixl_queue *que = vsi->queues;
1543 INIT_DBG_DEV(dev, "begin");
1545 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1547 device_printf(dev, "%s: could not allocate ifnet"
1548 " structure!\n", __func__);
1552 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1554 ifp->if_mtu = ETHERMTU;
1555 ifp->if_baudrate = IF_Gbps(40);
1556 ifp->if_init = ixlv_init;
1557 ifp->if_softc = vsi;
1558 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559 ifp->if_ioctl = ixlv_ioctl;
1561 #if __FreeBSD_version >= 1100000
1562 if_setgetcounterfn(ifp, ixl_get_counter);
1565 ifp->if_transmit = ixl_mq_start;
1567 ifp->if_qflush = ixl_qflush;
1568 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1570 ether_ifattach(ifp, sc->hw.mac.addr);
1572 vsi->max_frame_size =
1573 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1574 + ETHER_VLAN_ENCAP_LEN;
1577 * Tell the upper layer(s) we support long frames.
1579 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1581 ifp->if_capabilities |= IFCAP_HWCSUM;
1582 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1583 ifp->if_capabilities |= IFCAP_TSO;
1584 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1591 ifp->if_capenable = ifp->if_capabilities;
1594 ** Don't turn this on by default, if vlans are
1595 ** created on another pseudo device (eg. lagg)
1596 ** then vlan events are not passed thru, breaking
1597 ** operation, but with HW FILTER off it works. If
1598 ** using vlans directly on the ixl driver you can
1599 ** enable this and get full hardware tag filtering.
1601 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1604 * Specify the media types supported by this adapter and register
1605 * callbacks to update media and link information
1607 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1610 // JFV Add media types later?
1612 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1613 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1615 INIT_DBG_DEV(dev, "end");
1620 ** Allocate and setup the interface queues
1623 ixlv_setup_queues(struct ixlv_sc *sc)
1625 device_t dev = sc->dev;
1626 struct ixl_vsi *vsi;
1627 struct ixl_queue *que;
1628 struct tx_ring *txr;
1629 struct rx_ring *rxr;
1631 int error = I40E_SUCCESS;
1634 vsi->back = (void *)sc;
1638 /* Get memory for the station queues */
1640 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1641 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1642 device_printf(dev, "Unable to allocate queue memory\n");
1647 for (int i = 0; i < vsi->num_queues; i++) {
1648 que = &vsi->queues[i];
1649 que->num_desc = ixlv_ringsz;
1655 txr->tail = I40E_QTX_TAIL1(que->me);
1656 /* Initialize the TX lock */
1657 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1658 device_get_nameunit(dev), que->me);
1659 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1661 ** Create the TX descriptor ring, the extra int is
1662 ** added as the location for HEAD WB.
1664 tsize = roundup2((que->num_desc *
1665 sizeof(struct i40e_tx_desc)) +
1666 sizeof(u32), DBA_ALIGN);
1667 if (i40e_allocate_dma_mem(&sc->hw,
1668 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1670 "Unable to allocate TX Descriptor memory\n");
1674 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1675 bzero((void *)txr->base, tsize);
1676 /* Now allocate transmit soft structs for the ring */
1677 if (ixl_allocate_tx_data(que)) {
1679 "Critical Failure setting up TX structures\n");
1683 /* Allocate a buf ring */
1684 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1685 M_WAITOK, &txr->mtx);
1686 if (txr->br == NULL) {
1688 "Critical Failure setting up TX buf ring\n");
1694 * Next the RX queues...
1696 rsize = roundup2(que->num_desc *
1697 sizeof(union i40e_rx_desc), DBA_ALIGN);
1700 rxr->tail = I40E_QRX_TAIL1(que->me);
1702 /* Initialize the RX side lock */
1703 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1704 device_get_nameunit(dev), que->me);
1705 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1707 if (i40e_allocate_dma_mem(&sc->hw,
1708 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1710 "Unable to allocate RX Descriptor memory\n");
1714 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1715 bzero((void *)rxr->base, rsize);
1717 /* Allocate receive soft structs for the ring */
1718 if (ixl_allocate_rx_data(que)) {
1720 "Critical Failure setting up receive structs\n");
1729 for (int i = 0; i < vsi->num_queues; i++) {
1730 que = &vsi->queues[i];
1734 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1736 i40e_free_dma_mem(&sc->hw, &txr->dma);
1738 free(vsi->queues, M_DEVBUF);
1745 ** This routine is run via an vlan config EVENT,
1746 ** it enables us to use the HW Filter table since
1747 ** we can get the vlan id. This just creates the
1748 ** entry in the soft version of the VFTA, init will
1749 ** repopulate the real table.
1752 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1754 struct ixl_vsi *vsi = arg;
1755 struct ixlv_sc *sc = vsi->back;
1756 struct ixlv_vlan_filter *v;
1759 if (ifp->if_softc != arg) /* Not our event */
1762 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1765 /* Sanity check - make sure it doesn't already exist */
1766 SLIST_FOREACH(v, sc->vlan_filters, next) {
1767 if (v->vlan == vtag)
1773 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1774 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1776 v->flags = IXL_FILTER_ADD;
1777 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1778 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1779 mtx_unlock(&sc->mtx);
1784 ** This routine is run via an vlan
1785 ** unconfig EVENT, remove our entry
1786 ** in the soft vfta.
1789 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1791 struct ixl_vsi *vsi = arg;
1792 struct ixlv_sc *sc = vsi->back;
1793 struct ixlv_vlan_filter *v;
1796 if (ifp->if_softc != arg)
1799 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1803 SLIST_FOREACH(v, sc->vlan_filters, next) {
1804 if (v->vlan == vtag) {
1805 v->flags = IXL_FILTER_DEL;
1811 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1812 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1813 mtx_unlock(&sc->mtx);
1818 ** Get a new filter and add it to the mac filter list.
1820 static struct ixlv_mac_filter *
1821 ixlv_get_mac_filter(struct ixlv_sc *sc)
1823 struct ixlv_mac_filter *f;
1825 f = malloc(sizeof(struct ixlv_mac_filter),
1826 M_DEVBUF, M_NOWAIT | M_ZERO);
1828 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1834 ** Find the filter with matching MAC address
1836 static struct ixlv_mac_filter *
1837 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1839 struct ixlv_mac_filter *f;
1842 SLIST_FOREACH(f, sc->mac_filters, next) {
1843 if (cmp_etheraddr(f->macaddr, macaddr)) {
1855 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1857 device_t dev = sc->dev;
1860 if (sc->tag != NULL) {
1861 bus_teardown_intr(dev, sc->res, sc->tag);
1863 device_printf(dev, "bus_teardown_intr() for"
1864 " interrupt 0 failed\n");
1869 if (sc->res != NULL) {
1870 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1872 device_printf(dev, "bus_release_resource() for"
1873 " interrupt 0 failed\n");
1884 ** Admin Queue interrupt handler
1887 ixlv_msix_adminq(void *arg)
1889 struct ixlv_sc *sc = arg;
1890 struct i40e_hw *hw = &sc->hw;
1893 reg = rd32(hw, I40E_VFINT_ICR01);
1894 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1896 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1897 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1898 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1901 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1906 ixlv_enable_intr(struct ixl_vsi *vsi)
1908 struct i40e_hw *hw = vsi->hw;
1909 struct ixl_queue *que = vsi->queues;
1911 ixlv_enable_adminq_irq(hw);
1912 for (int i = 0; i < vsi->num_queues; i++, que++)
1913 ixlv_enable_queue_irq(hw, que->me);
1917 ixlv_disable_intr(struct ixl_vsi *vsi)
1919 struct i40e_hw *hw = vsi->hw;
1920 struct ixl_queue *que = vsi->queues;
1922 ixlv_disable_adminq_irq(hw);
1923 for (int i = 0; i < vsi->num_queues; i++, que++)
1924 ixlv_disable_queue_irq(hw, que->me);
1929 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1931 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1932 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1934 rd32(hw, I40E_VFGEN_RSTAT);
1939 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1941 wr32(hw, I40E_VFINT_DYN_CTL01,
1942 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1943 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1944 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1946 rd32(hw, I40E_VFGEN_RSTAT);
1951 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1955 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1956 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1957 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1958 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1962 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1964 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1965 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1966 rd32(hw, I40E_VFGEN_RSTAT);
1971 * Get initial ITR values from tunable values.
1974 ixlv_configure_itr(struct ixlv_sc *sc)
1976 struct i40e_hw *hw = &sc->hw;
1977 struct ixl_vsi *vsi = &sc->vsi;
1978 struct ixl_queue *que = vsi->queues;
1980 vsi->rx_itr_setting = ixlv_rx_itr;
1981 vsi->tx_itr_setting = ixlv_tx_itr;
1983 for (int i = 0; i < vsi->num_queues; i++, que++) {
1984 struct tx_ring *txr = &que->txr;
1985 struct rx_ring *rxr = &que->rxr;
1987 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1988 vsi->rx_itr_setting);
1989 rxr->itr = vsi->rx_itr_setting;
1990 rxr->latency = IXL_AVE_LATENCY;
1992 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1993 vsi->tx_itr_setting);
1994 txr->itr = vsi->tx_itr_setting;
1995 txr->latency = IXL_AVE_LATENCY;
2000 ** Provide a update to the queue RX
2001 ** interrupt moderation value.
2004 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2006 struct ixl_vsi *vsi = que->vsi;
2007 struct i40e_hw *hw = vsi->hw;
2008 struct rx_ring *rxr = &que->rxr;
2014 /* Idle, do nothing */
2015 if (rxr->bytes == 0)
2018 if (ixlv_dynamic_rx_itr) {
2019 rx_bytes = rxr->bytes/rxr->itr;
2022 /* Adjust latency range */
2023 switch (rxr->latency) {
2024 case IXL_LOW_LATENCY:
2025 if (rx_bytes > 10) {
2026 rx_latency = IXL_AVE_LATENCY;
2027 rx_itr = IXL_ITR_20K;
2030 case IXL_AVE_LATENCY:
2031 if (rx_bytes > 20) {
2032 rx_latency = IXL_BULK_LATENCY;
2033 rx_itr = IXL_ITR_8K;
2034 } else if (rx_bytes <= 10) {
2035 rx_latency = IXL_LOW_LATENCY;
2036 rx_itr = IXL_ITR_100K;
2039 case IXL_BULK_LATENCY:
2040 if (rx_bytes <= 20) {
2041 rx_latency = IXL_AVE_LATENCY;
2042 rx_itr = IXL_ITR_20K;
2047 rxr->latency = rx_latency;
2049 if (rx_itr != rxr->itr) {
2050 /* do an exponential smoothing */
2051 rx_itr = (10 * rx_itr * rxr->itr) /
2052 ((9 * rx_itr) + rxr->itr);
2053 rxr->itr = min(rx_itr, IXL_MAX_ITR);
2054 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2055 que->me), rxr->itr);
2057 } else { /* We may have have toggled to non-dynamic */
2058 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2059 vsi->rx_itr_setting = ixlv_rx_itr;
2060 /* Update the hardware if needed */
2061 if (rxr->itr != vsi->rx_itr_setting) {
2062 rxr->itr = vsi->rx_itr_setting;
2063 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2064 que->me), rxr->itr);
2074 ** Provide a update to the queue TX
2075 ** interrupt moderation value.
2078 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2080 struct ixl_vsi *vsi = que->vsi;
2081 struct i40e_hw *hw = vsi->hw;
2082 struct tx_ring *txr = &que->txr;
2088 /* Idle, do nothing */
2089 if (txr->bytes == 0)
2092 if (ixlv_dynamic_tx_itr) {
2093 tx_bytes = txr->bytes/txr->itr;
2096 switch (txr->latency) {
2097 case IXL_LOW_LATENCY:
2098 if (tx_bytes > 10) {
2099 tx_latency = IXL_AVE_LATENCY;
2100 tx_itr = IXL_ITR_20K;
2103 case IXL_AVE_LATENCY:
2104 if (tx_bytes > 20) {
2105 tx_latency = IXL_BULK_LATENCY;
2106 tx_itr = IXL_ITR_8K;
2107 } else if (tx_bytes <= 10) {
2108 tx_latency = IXL_LOW_LATENCY;
2109 tx_itr = IXL_ITR_100K;
2112 case IXL_BULK_LATENCY:
2113 if (tx_bytes <= 20) {
2114 tx_latency = IXL_AVE_LATENCY;
2115 tx_itr = IXL_ITR_20K;
2120 txr->latency = tx_latency;
2122 if (tx_itr != txr->itr) {
2123 /* do an exponential smoothing */
2124 tx_itr = (10 * tx_itr * txr->itr) /
2125 ((9 * tx_itr) + txr->itr);
2126 txr->itr = min(tx_itr, IXL_MAX_ITR);
2127 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2128 que->me), txr->itr);
2131 } else { /* We may have have toggled to non-dynamic */
2132 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2133 vsi->tx_itr_setting = ixlv_tx_itr;
2134 /* Update the hardware if needed */
2135 if (txr->itr != vsi->tx_itr_setting) {
2136 txr->itr = vsi->tx_itr_setting;
2137 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2138 que->me), txr->itr);
2149 ** MSIX Interrupt Handlers and Tasklets
2153 ixlv_handle_que(void *context, int pending)
2155 struct ixl_queue *que = context;
2156 struct ixl_vsi *vsi = que->vsi;
2157 struct i40e_hw *hw = vsi->hw;
2158 struct tx_ring *txr = &que->txr;
2159 struct ifnet *ifp = vsi->ifp;
2162 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2163 more = ixl_rxeof(que, IXL_RX_LIMIT);
2164 mtx_lock(&txr->mtx);
2166 if (!drbr_empty(ifp, txr->br))
2167 ixl_mq_start_locked(ifp, txr);
2168 mtx_unlock(&txr->mtx);
2170 taskqueue_enqueue(que->tq, &que->task);
2175 /* Reenable this interrupt - hmmm */
2176 ixlv_enable_queue_irq(hw, que->me);
2181 /*********************************************************************
2183 * MSIX Queue Interrupt Service routine
2185 **********************************************************************/
2187 ixlv_msix_que(void *arg)
2189 struct ixl_queue *que = arg;
2190 struct ixl_vsi *vsi = que->vsi;
2191 struct i40e_hw *hw = vsi->hw;
2192 struct tx_ring *txr = &que->txr;
2193 bool more_tx, more_rx;
2195 /* Spurious interrupts are ignored */
2196 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2201 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2203 mtx_lock(&txr->mtx);
2204 more_tx = ixl_txeof(que);
2206 ** Make certain that if the stack
2207 ** has anything queued the task gets
2208 ** scheduled to handle it.
2210 if (!drbr_empty(vsi->ifp, txr->br))
2212 mtx_unlock(&txr->mtx);
2214 ixlv_set_queue_rx_itr(que);
2215 ixlv_set_queue_tx_itr(que);
2217 if (more_tx || more_rx)
2218 taskqueue_enqueue(que->tq, &que->task);
2220 ixlv_enable_queue_irq(hw, que->me);
2226 /*********************************************************************
2228 * Media Ioctl callback
2230 * This routine is called whenever the user queries the status of
2231 * the interface using ifconfig.
2233 **********************************************************************/
2235 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2237 struct ixl_vsi *vsi = ifp->if_softc;
2238 struct ixlv_sc *sc = vsi->back;
2240 INIT_DBG_IF(ifp, "begin");
2244 ixlv_update_link_status(sc);
2246 ifmr->ifm_status = IFM_AVALID;
2247 ifmr->ifm_active = IFM_ETHER;
2250 mtx_unlock(&sc->mtx);
2251 INIT_DBG_IF(ifp, "end: link not up");
2255 ifmr->ifm_status |= IFM_ACTIVE;
2256 /* Hardware is always full-duplex */
2257 ifmr->ifm_active |= IFM_FDX;
2258 mtx_unlock(&sc->mtx);
2259 INIT_DBG_IF(ifp, "end");
2263 /*********************************************************************
2265 * Media Ioctl callback
2267 * This routine is called when the user changes speed/duplex using
2268 * media/mediopt option with ifconfig.
2270 **********************************************************************/
2272 ixlv_media_change(struct ifnet * ifp)
2274 struct ixl_vsi *vsi = ifp->if_softc;
2275 struct ifmedia *ifm = &vsi->media;
2277 INIT_DBG_IF(ifp, "begin");
2279 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2282 INIT_DBG_IF(ifp, "end");
2287 /*********************************************************************
2288 * Multicast Initialization
2290 * This routine is called by init to reset a fresh state.
2292 **********************************************************************/
2295 ixlv_init_multi(struct ixl_vsi *vsi)
2297 struct ixlv_mac_filter *f;
2298 struct ixlv_sc *sc = vsi->back;
2301 IOCTL_DBG_IF(vsi->ifp, "begin");
2303 /* First clear any multicast filters */
2304 SLIST_FOREACH(f, sc->mac_filters, next) {
2305 if ((f->flags & IXL_FILTER_USED)
2306 && (f->flags & IXL_FILTER_MC)) {
2307 f->flags |= IXL_FILTER_DEL;
2312 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2313 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2316 IOCTL_DBG_IF(vsi->ifp, "end");
2320 ixlv_add_multi(struct ixl_vsi *vsi)
2322 struct ifmultiaddr *ifma;
2323 struct ifnet *ifp = vsi->ifp;
2324 struct ixlv_sc *sc = vsi->back;
2327 IOCTL_DBG_IF(ifp, "begin");
2329 if_maddr_rlock(ifp);
2331 ** Get a count, to decide if we
2332 ** simply use multicast promiscuous.
2334 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2335 if (ifma->ifma_addr->sa_family != AF_LINK)
2339 if_maddr_runlock(ifp);
2341 /* TODO: Remove -- cannot set promiscuous mode in a VF */
2342 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2343 /* delete all multicast filters */
2344 ixlv_init_multi(vsi);
2345 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2346 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2347 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2349 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2354 if_maddr_rlock(ifp);
2355 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2356 if (ifma->ifma_addr->sa_family != AF_LINK)
2358 if (!ixlv_add_mac_filter(sc,
2359 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2363 if_maddr_runlock(ifp);
2365 ** Notify AQ task that sw filters need to be
2369 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2370 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2373 IOCTL_DBG_IF(ifp, "end");
2377 ixlv_del_multi(struct ixl_vsi *vsi)
2379 struct ixlv_mac_filter *f;
2380 struct ifmultiaddr *ifma;
2381 struct ifnet *ifp = vsi->ifp;
2382 struct ixlv_sc *sc = vsi->back;
2386 IOCTL_DBG_IF(ifp, "begin");
2388 /* Search for removed multicast addresses */
2389 if_maddr_rlock(ifp);
2390 SLIST_FOREACH(f, sc->mac_filters, next) {
2391 if ((f->flags & IXL_FILTER_USED)
2392 && (f->flags & IXL_FILTER_MC)) {
2393 /* check if mac address in filter is in sc's list */
2395 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2396 if (ifma->ifma_addr->sa_family != AF_LINK)
2399 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2400 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2405 /* if this filter is not in the sc's list, remove it */
2406 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2407 f->flags |= IXL_FILTER_DEL;
2409 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2410 MAC_FORMAT_ARGS(f->macaddr));
2412 else if (match == FALSE)
2413 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2414 MAC_FORMAT_ARGS(f->macaddr));
2417 if_maddr_runlock(ifp);
2420 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2421 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2424 IOCTL_DBG_IF(ifp, "end");
2427 /*********************************************************************
2430 * This routine checks for link status,updates statistics,
2431 * and runs the watchdog check.
2433 **********************************************************************/
2436 ixlv_local_timer(void *arg)
2438 struct ixlv_sc *sc = arg;
2439 struct i40e_hw *hw = &sc->hw;
2440 struct ixl_vsi *vsi = &sc->vsi;
2441 struct ixl_queue *que = vsi->queues;
2442 device_t dev = sc->dev;
2443 struct tx_ring *txr;
2446 s32 timer, new_timer;
2448 IXLV_CORE_LOCK_ASSERT(sc);
2450 /* If Reset is in progress just bail */
2451 if (sc->init_state == IXLV_RESET_PENDING)
2454 /* Check for when PF triggers a VF reset */
2455 val = rd32(hw, I40E_VFGEN_RSTAT) &
2456 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2458 if (val != I40E_VFR_VFACTIVE
2459 && val != I40E_VFR_COMPLETED) {
2460 DDPRINTF(dev, "reset in progress! (%d)", val);
2464 ixlv_request_stats(sc);
2466 /* clean and process any events */
2467 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2470 ** Check status on the queues for a hang
2472 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2473 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
2474 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2476 for (int i = 0; i < vsi->num_queues; i++, que++) {
2478 timer = atomic_load_acq_32(&txr->watchdog_timer);
2480 new_timer = timer - hz;
2481 if (new_timer <= 0) {
2482 atomic_store_rel_32(&txr->watchdog_timer, -1);
2483 device_printf(dev, "WARNING: queue %d "
2484 "appears to be hung!\n", que->me);
2488 * If this fails, that means something in the TX path has updated
2489 * the watchdog, so it means the TX path is still working and
2490 * the watchdog doesn't need to countdown.
2492 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
2493 /* Any queues with outstanding work get a sw irq */
2494 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2498 /* Reset when a queue shows hung */
2502 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2506 device_printf(dev, "WARNING: Resetting!\n");
2507 sc->init_state = IXLV_RESET_REQUIRED;
2508 sc->watchdog_events++;
2510 ixlv_init_locked(sc);
2514 ** Note: this routine updates the OS on the link state
2515 ** the real check of the hardware only happens with
2516 ** a link interrupt.
2519 ixlv_update_link_status(struct ixlv_sc *sc)
2521 struct ixl_vsi *vsi = &sc->vsi;
2522 struct ifnet *ifp = vsi->ifp;
2525 if (vsi->link_active == FALSE) {
2527 if_printf(ifp,"Link is Up, %d Gbps\n",
2528 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2529 vsi->link_active = TRUE;
2530 if_link_state_change(ifp, LINK_STATE_UP);
2532 } else { /* Link down */
2533 if (vsi->link_active == TRUE) {
2535 if_printf(ifp,"Link is Down\n");
2536 if_link_state_change(ifp, LINK_STATE_DOWN);
2537 vsi->link_active = FALSE;
2544 /*********************************************************************
2546 * This routine disables all traffic on the adapter by issuing a
2547 * global reset on the MAC and deallocates TX/RX buffers.
2549 **********************************************************************/
2552 ixlv_stop(struct ixlv_sc *sc)
2558 INIT_DBG_IF(ifp, "begin");
2560 IXLV_CORE_LOCK_ASSERT(sc);
2562 ixl_vc_flush(&sc->vc_mgr);
2563 ixlv_disable_queues(sc);
2566 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2567 ((ticks - start) < hz/10))
2568 ixlv_do_adminq_locked(sc);
2570 /* Stop the local timer */
2571 callout_stop(&sc->timer);
2573 INIT_DBG_IF(ifp, "end");
2577 /*********************************************************************
2579 * Free all station queue structs.
2581 **********************************************************************/
2583 ixlv_free_queues(struct ixl_vsi *vsi)
2585 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2586 struct ixl_queue *que = vsi->queues;
2588 for (int i = 0; i < vsi->num_queues; i++, que++) {
2589 struct tx_ring *txr = &que->txr;
2590 struct rx_ring *rxr = &que->rxr;
2592 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2595 ixl_free_que_tx(que);
2597 i40e_free_dma_mem(&sc->hw, &txr->dma);
2599 IXL_TX_LOCK_DESTROY(txr);
2601 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2604 ixl_free_que_rx(que);
2606 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2608 IXL_RX_LOCK_DESTROY(rxr);
2611 free(vsi->queues, M_DEVBUF);
2615 ixlv_config_rss_reg(struct ixlv_sc *sc)
2617 struct i40e_hw *hw = &sc->hw;
2618 struct ixl_vsi *vsi = &sc->vsi;
2620 u64 set_hena = 0, hena;
2622 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2624 u32 rss_hash_config;
2627 /* Don't set up RSS if using a single queue */
2628 if (vsi->num_queues == 1) {
2629 wr32(hw, I40E_VFQF_HENA(0), 0);
2630 wr32(hw, I40E_VFQF_HENA(1), 0);
2636 /* Fetch the configured RSS key */
2637 rss_getkey((uint8_t *) &rss_seed);
2639 ixl_get_default_rss_key(rss_seed);
2642 /* Fill out hash function seed */
2643 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2644 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2646 /* Enable PCTYPES for RSS: */
2648 rss_hash_config = rss_gethashconfig();
2649 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2650 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2651 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2652 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2653 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2654 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2655 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2656 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2657 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2658 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2659 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2660 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2661 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2662 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2664 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2666 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2667 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2669 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2670 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2672 /* Populate the LUT with max no. of queues in round robin fashion */
2673 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2674 if (j == vsi->num_queues)
2678 * Fetch the RSS bucket id for the given indirection entry.
2679 * Cap it at the number of configured buckets (which is
2682 que_id = rss_get_indirection_to_bucket(i);
2683 que_id = que_id % vsi->num_queues;
2687 /* lut = 4-byte sliding window of 4 lut entries */
2688 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2689 /* On i = 3, we have 4 entries in lut; write to the register */
2691 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2692 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2699 ixlv_config_rss_pf(struct ixlv_sc *sc)
2701 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2702 IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2704 ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2705 IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2707 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2708 IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2712 ** ixlv_config_rss - setup RSS
2714 ** RSS keys and table are cleared on VF reset.
2717 ixlv_config_rss(struct ixlv_sc *sc)
2719 if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2720 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2721 ixlv_config_rss_reg(sc);
2722 } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2723 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2724 ixlv_config_rss_pf(sc);
2726 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2730 ** This routine refreshes vlan filters, called by init
2731 ** it scans the filter table and then updates the AQ
2734 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2736 struct ixl_vsi *vsi = &sc->vsi;
2737 struct ixlv_vlan_filter *f;
2740 if (vsi->num_vlans == 0)
2743 ** Scan the filter table for vlan entries,
2744 ** and if found call for the AQ update.
2746 SLIST_FOREACH(f, sc->vlan_filters, next)
2747 if (f->flags & IXL_FILTER_ADD)
2750 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2751 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2756 ** This routine adds new MAC filters to the sc's list;
2757 ** these are later added in hardware by sending a virtual
2761 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2763 struct ixlv_mac_filter *f;
2765 /* Does one already exist? */
2766 f = ixlv_find_mac_filter(sc, macaddr);
2768 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2769 MAC_FORMAT_ARGS(macaddr));
2773 /* If not, get a new empty filter */
2774 f = ixlv_get_mac_filter(sc);
2776 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2781 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2782 MAC_FORMAT_ARGS(macaddr));
2784 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2785 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2791 ** Marks a MAC filter for deletion.
2794 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2796 struct ixlv_mac_filter *f;
2798 f = ixlv_find_mac_filter(sc, macaddr);
2802 f->flags |= IXL_FILTER_DEL;
2807 ** Tasklet handler for MSIX Adminq interrupts
2808 ** - done outside interrupt context since it might sleep
2811 ixlv_do_adminq(void *context, int pending)
2813 struct ixlv_sc *sc = context;
2816 ixlv_do_adminq_locked(sc);
2817 mtx_unlock(&sc->mtx);
2822 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2824 struct i40e_hw *hw = &sc->hw;
2825 struct i40e_arq_event_info event;
2826 struct i40e_virtchnl_msg *v_msg;
2827 device_t dev = sc->dev;
2831 bool aq_error = false;
2833 IXLV_CORE_LOCK_ASSERT(sc);
2835 event.buf_len = IXL_AQ_BUF_SZ;
2836 event.msg_buf = sc->aq_buffer;
2837 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2840 ret = i40e_clean_arq_element(hw, &event, &result);
2843 ixlv_vc_completion(sc, v_msg->v_opcode,
2844 v_msg->v_retval, event.msg_buf, event.msg_len);
2846 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2849 /* check for Admin queue errors */
2850 oldreg = reg = rd32(hw, hw->aq.arq.len);
2851 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2852 device_printf(dev, "ARQ VF Error detected\n");
2853 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2856 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2857 device_printf(dev, "ARQ Overflow Error detected\n");
2858 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2861 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2862 device_printf(dev, "ARQ Critical Error detected\n");
2863 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2867 wr32(hw, hw->aq.arq.len, reg);
2869 oldreg = reg = rd32(hw, hw->aq.asq.len);
2870 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2871 device_printf(dev, "ASQ VF Error detected\n");
2872 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2875 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2876 device_printf(dev, "ASQ Overflow Error detected\n");
2877 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2880 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2881 device_printf(dev, "ASQ Critical Error detected\n");
2882 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2886 wr32(hw, hw->aq.asq.len, reg);
2889 /* Need to reset adapter */
2890 device_printf(dev, "WARNING: Resetting!\n");
2891 sc->init_state = IXLV_RESET_REQUIRED;
2893 ixlv_init_locked(sc);
2895 ixlv_enable_adminq_irq(hw);
2899 ixlv_add_sysctls(struct ixlv_sc *sc)
2901 device_t dev = sc->dev;
2902 struct ixl_vsi *vsi = &sc->vsi;
2903 struct i40e_eth_stats *es = &vsi->eth_stats;
2905 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2906 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2907 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2909 struct sysctl_oid *vsi_node, *queue_node;
2910 struct sysctl_oid_list *vsi_list, *queue_list;
2912 #define QUEUE_NAME_LEN 32
2913 char queue_namebuf[QUEUE_NAME_LEN];
2915 struct ixl_queue *queues = vsi->queues;
2916 struct tx_ring *txr;
2917 struct rx_ring *rxr;
2919 /* Driver statistics sysctls */
2920 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2921 CTLFLAG_RD, &sc->watchdog_events,
2922 "Watchdog timeouts");
2923 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2924 CTLFLAG_RD, &sc->admin_irq,
2925 "Admin Queue IRQ Handled");
2927 /* VSI statistics sysctls */
2928 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2929 CTLFLAG_RD, NULL, "VSI-specific statistics");
2930 vsi_list = SYSCTL_CHILDREN(vsi_node);
2932 struct ixl_sysctl_info ctls[] =
2934 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2935 {&es->rx_unicast, "ucast_pkts_rcvd",
2936 "Unicast Packets Received"},
2937 {&es->rx_multicast, "mcast_pkts_rcvd",
2938 "Multicast Packets Received"},
2939 {&es->rx_broadcast, "bcast_pkts_rcvd",
2940 "Broadcast Packets Received"},
2941 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2942 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2943 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2944 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2945 {&es->tx_multicast, "mcast_pkts_txd",
2946 "Multicast Packets Transmitted"},
2947 {&es->tx_broadcast, "bcast_pkts_txd",
2948 "Broadcast Packets Transmitted"},
2949 {&es->tx_errors, "tx_errors", "TX packet errors"},
2953 struct ixl_sysctl_info *entry = ctls;
2954 while (entry->stat != NULL)
2956 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2957 CTLFLAG_RD, entry->stat,
2958 entry->description);
2963 for (int q = 0; q < vsi->num_queues; q++) {
2964 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2965 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2966 CTLFLAG_RD, NULL, "Queue Name");
2967 queue_list = SYSCTL_CHILDREN(queue_node);
2969 txr = &(queues[q].txr);
2970 rxr = &(queues[q].rxr);
2972 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2973 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2974 "m_defrag() failed");
2975 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2976 CTLFLAG_RD, &(queues[q].dropped_pkts),
2977 "Driver dropped packets");
2978 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2979 CTLFLAG_RD, &(queues[q].irqs),
2980 "irqs on this queue");
2981 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2982 CTLFLAG_RD, &(queues[q].tso),
2984 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2985 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2986 "Driver tx dma failure in xmit");
2987 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2988 CTLFLAG_RD, &(txr->no_desc),
2989 "Queue No Descriptor Available");
2990 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2991 CTLFLAG_RD, &(txr->total_packets),
2992 "Queue Packets Transmitted");
2993 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2994 CTLFLAG_RD, &(txr->tx_bytes),
2995 "Queue Bytes Transmitted");
2996 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2997 CTLFLAG_RD, &(rxr->rx_packets),
2998 "Queue Packets Received");
2999 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3000 CTLFLAG_RD, &(rxr->rx_bytes),
3001 "Queue Bytes Received");
3002 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3003 CTLFLAG_RD, &(rxr->itr), 0,
3004 "Queue Rx ITR Interval");
3005 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3006 CTLFLAG_RD, &(txr->itr), 0,
3007 "Queue Tx ITR Interval");
3010 /* Examine queue state */
3011 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
3012 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3013 sizeof(struct ixl_queue),
3014 ixlv_sysctl_qtx_tail_handler, "IU",
3015 "Queue Transmit Descriptor Tail");
3016 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
3017 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3018 sizeof(struct ixl_queue),
3019 ixlv_sysctl_qrx_tail_handler, "IU",
3020 "Queue Receive Descriptor Tail");
3021 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3022 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3023 "Ticks before watchdog event is triggered");
3029 ixlv_init_filters(struct ixlv_sc *sc)
3031 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3032 M_DEVBUF, M_NOWAIT | M_ZERO);
3033 SLIST_INIT(sc->mac_filters);
3034 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3035 M_DEVBUF, M_NOWAIT | M_ZERO);
3036 SLIST_INIT(sc->vlan_filters);
3041 ixlv_free_filters(struct ixlv_sc *sc)
3043 struct ixlv_mac_filter *f;
3044 struct ixlv_vlan_filter *v;
3046 while (!SLIST_EMPTY(sc->mac_filters)) {
3047 f = SLIST_FIRST(sc->mac_filters);
3048 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3051 while (!SLIST_EMPTY(sc->vlan_filters)) {
3052 v = SLIST_FIRST(sc->vlan_filters);
3053 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3061 * ixlv_sysctl_qtx_tail_handler
3062 * Retrieves I40E_QTX_TAIL1 value from hardware
3066 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3068 struct ixl_queue *que;
3072 que = ((struct ixl_queue *)oidp->oid_arg1);
3075 val = rd32(que->vsi->hw, que->txr.tail);
3076 error = sysctl_handle_int(oidp, &val, 0, req);
3077 if (error || !req->newptr)
3083 * ixlv_sysctl_qrx_tail_handler
3084 * Retrieves I40E_QRX_TAIL1 value from hardware
3088 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3090 struct ixl_queue *que;
3094 que = ((struct ixl_queue *)oidp->oid_arg1);
3097 val = rd32(que->vsi->hw, que->rxr.tail);
3098 error = sysctl_handle_int(oidp, &val, 0, req);
3099 if (error || !req->newptr)