1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixlv_driver_version[] = "1.2.4";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixlv_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixlv_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
66 /* required last entry */
70 /*********************************************************************
71 * Table of branding strings
72 *********************************************************************/
74 static char *ixlv_strings[] = {
75 "Intel(R) Ethernet Connection XL710 VF Driver"
79 /*********************************************************************
81 *********************************************************************/
82 static int ixlv_probe(device_t);
83 static int ixlv_attach(device_t);
84 static int ixlv_detach(device_t);
85 static int ixlv_shutdown(device_t);
86 static void ixlv_init_locked(struct ixlv_sc *);
87 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
88 static void ixlv_free_pci_resources(struct ixlv_sc *);
89 static int ixlv_assign_msix(struct ixlv_sc *);
90 static int ixlv_init_msix(struct ixlv_sc *);
91 static int ixlv_init_taskqueue(struct ixlv_sc *);
92 static int ixlv_setup_queues(struct ixlv_sc *);
93 static void ixlv_config_rss(struct ixlv_sc *);
94 static void ixlv_stop(struct ixlv_sc *);
95 static void ixlv_add_multi(struct ixl_vsi *);
96 static void ixlv_del_multi(struct ixl_vsi *);
97 static void ixlv_free_queues(struct ixl_vsi *);
98 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
100 static int ixlv_media_change(struct ifnet *);
101 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
103 static void ixlv_local_timer(void *);
105 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107 static void ixlv_init_filters(struct ixlv_sc *);
108 static void ixlv_free_filters(struct ixlv_sc *);
110 static void ixlv_msix_que(void *);
111 static void ixlv_msix_adminq(void *);
112 static void ixlv_do_adminq(void *, int);
113 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
114 static void ixlv_handle_que(void *, int);
115 static int ixlv_reset(struct ixlv_sc *);
116 static int ixlv_reset_complete(struct i40e_hw *);
117 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
118 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
119 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120 enum i40e_status_code);
122 static void ixlv_enable_adminq_irq(struct i40e_hw *);
123 static void ixlv_disable_adminq_irq(struct i40e_hw *);
124 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
125 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
127 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
128 static void ixlv_register_vlan(void *, struct ifnet *, u16);
129 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
131 static void ixlv_init_hw(struct ixlv_sc *);
132 static int ixlv_setup_vc(struct ixlv_sc *);
133 static int ixlv_vf_config(struct ixlv_sc *);
135 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
136 struct ifnet *, int);
138 static void ixlv_add_sysctls(struct ixlv_sc *);
139 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
140 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
142 /*********************************************************************
143 * FreeBSD Device Interface Entry Points
144 *********************************************************************/
146 static device_method_t ixlv_methods[] = {
147 /* Device interface */
148 DEVMETHOD(device_probe, ixlv_probe),
149 DEVMETHOD(device_attach, ixlv_attach),
150 DEVMETHOD(device_detach, ixlv_detach),
151 DEVMETHOD(device_shutdown, ixlv_shutdown),
155 static driver_t ixlv_driver = {
156 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
159 devclass_t ixlv_devclass;
160 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
162 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
166 ** TUNEABLE PARAMETERS:
169 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
170 "IXLV driver parameters");
173 ** Number of descriptors per ring:
174 ** - TX and RX are the same size
176 static int ixlv_ringsz = DEFAULT_RING;
177 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
178 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
179 &ixlv_ringsz, 0, "Descriptor Ring Size");
181 /* Set to zero to auto calculate */
182 int ixlv_max_queues = 0;
183 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
184 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
185 &ixlv_max_queues, 0, "Number of Queues");
188 ** Number of entries in Tx queue buf_ring.
189 ** Increasing this will reduce the number of
190 ** errors when transmitting fragmented UDP
193 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
194 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
195 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
196 &ixlv_txbrsz, 0, "TX Buf Ring Size");
199 ** Controls for Interrupt Throttling
200 ** - true/false for dynamic adjustment
201 ** - default values for static ITR
203 int ixlv_dynamic_rx_itr = 0;
204 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
205 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
206 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
208 int ixlv_dynamic_tx_itr = 0;
209 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
210 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
211 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
213 int ixlv_rx_itr = IXL_ITR_8K;
214 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
215 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
216 &ixlv_rx_itr, 0, "RX Interrupt Rate");
218 int ixlv_tx_itr = IXL_ITR_4K;
219 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
220 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
221 &ixlv_tx_itr, 0, "TX Interrupt Rate");
224 /*********************************************************************
225 * Device identification routine
227 * ixlv_probe determines if the driver should be loaded on
228 * the hardware based on PCI vendor/device id of the device.
230 * return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
234 ixlv_probe(device_t dev)
236 ixl_vendor_info_t *ent;
238 u16 pci_vendor_id, pci_device_id;
239 u16 pci_subvendor_id, pci_subdevice_id;
240 char device_name[256];
242 INIT_DEBUGOUT("ixlv_probe: begin");
244 pci_vendor_id = pci_get_vendor(dev);
245 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
248 pci_device_id = pci_get_device(dev);
249 pci_subvendor_id = pci_get_subvendor(dev);
250 pci_subdevice_id = pci_get_subdevice(dev);
252 ent = ixlv_vendor_info_array;
253 while (ent->vendor_id != 0) {
254 if ((pci_vendor_id == ent->vendor_id) &&
255 (pci_device_id == ent->device_id) &&
257 ((pci_subvendor_id == ent->subvendor_id) ||
258 (ent->subvendor_id == 0)) &&
260 ((pci_subdevice_id == ent->subdevice_id) ||
261 (ent->subdevice_id == 0))) {
262 sprintf(device_name, "%s, Version - %s",
263 ixlv_strings[ent->index],
264 ixlv_driver_version);
265 device_set_desc_copy(dev, device_name);
266 return (BUS_PROBE_DEFAULT);
273 /*********************************************************************
274 * Device initialization routine
276 * The attach entry point is called when the driver is being loaded.
277 * This routine identifies the type of hardware, allocates all resources
278 * and initializes the hardware.
280 * return 0 on success, positive on failure
281 *********************************************************************/
284 ixlv_attach(device_t dev)
291 INIT_DBG_DEV(dev, "begin");
293 /* Allocate, clear, and link in our primary soft structure */
294 sc = device_get_softc(dev);
295 sc->dev = sc->osdep.dev = dev;
300 /* Initialize hw struct */
303 /* Allocate filter lists */
304 ixlv_init_filters(sc);
307 mtx_init(&sc->mtx, device_get_nameunit(dev),
308 "IXL SC Lock", MTX_DEF);
310 /* Set up the timer callout */
311 callout_init_mtx(&sc->timer, &sc->mtx, 0);
313 /* Do PCI setup - map BAR0, etc */
314 if (ixlv_allocate_pci_resources(sc)) {
315 device_printf(dev, "%s: Allocation of PCI resources failed\n",
321 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
323 error = i40e_set_mac_type(hw);
325 device_printf(dev, "%s: set_mac_type failed: %d\n",
330 error = ixlv_reset_complete(hw);
332 device_printf(dev, "%s: Device is still being reset\n",
337 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
339 error = ixlv_setup_vc(sc);
341 device_printf(dev, "%s: Error setting up PF comms, %d\n",
346 INIT_DBG_DEV(dev, "PF API version verified");
348 /* TODO: Figure out why MDD events occur when this reset is removed. */
349 /* Need API version before sending reset message */
350 error = ixlv_reset(sc);
352 device_printf(dev, "VF reset failed; reload the driver\n");
356 INIT_DBG_DEV(dev, "VF reset complete");
358 /* Ask for VF config from PF */
359 error = ixlv_vf_config(sc);
361 device_printf(dev, "Error getting configuration from PF: %d\n",
366 INIT_DBG_DEV(dev, "VF config from PF:");
367 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
368 sc->vf_res->num_vsis,
369 sc->vf_res->num_queue_pairs,
370 sc->vf_res->max_vectors,
371 sc->vf_res->max_mtu);
372 INIT_DBG_DEV(dev, "Offload flags: %#010x",
373 sc->vf_res->vf_offload_flags);
375 // TODO: Move this into ixlv_vf_config?
376 /* got VF config message back from PF, now we can parse it */
377 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
378 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
379 sc->vsi_res = &sc->vf_res->vsi_res[i];
382 device_printf(dev, "%s: no LAN VSI found\n", __func__);
387 INIT_DBG_DEV(dev, "Resource Acquisition complete");
389 /* If no mac address was assigned just make a random one */
390 if (!ixlv_check_ether_addr(hw->mac.addr)) {
391 u8 addr[ETHER_ADDR_LEN];
392 arc4rand(&addr, sizeof(addr), 0);
395 bcopy(addr, hw->mac.addr, sizeof(addr));
398 vsi->id = sc->vsi_res->vsi_id;
399 vsi->back = (void *)sc;
402 /* This allocates the memory and early settings */
403 if (ixlv_setup_queues(sc) != 0) {
404 device_printf(dev, "%s: setup queues failed!\n",
410 /* Setup the stack interface */
411 if (ixlv_setup_interface(dev, sc) != 0) {
412 device_printf(dev, "%s: setup interface failed!\n",
418 INIT_DBG_DEV(dev, "Queue memory and interface setup");
420 /* Do queue interrupt setup */
421 ixlv_assign_msix(sc);
423 /* Start AdminQ taskqueue */
424 ixlv_init_taskqueue(sc);
426 /* Initialize stats */
427 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
428 ixlv_add_sysctls(sc);
430 /* Register for VLAN events */
431 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
436 /* We want AQ enabled early */
437 ixlv_enable_adminq_irq(hw);
439 /* Set things up to run init */
440 sc->init_state = IXLV_INIT_READY;
442 ixl_vc_init_mgr(sc, &sc->vc_mgr);
444 INIT_DBG_DEV(dev, "end");
448 ixlv_free_queues(vsi);
450 free(sc->vf_res, M_DEVBUF);
452 i40e_shutdown_adminq(hw);
454 ixlv_free_pci_resources(sc);
456 mtx_destroy(&sc->mtx);
457 ixlv_free_filters(sc);
458 INIT_DBG_DEV(dev, "end: error %d", error);
462 /*********************************************************************
463 * Device removal routine
465 * The detach entry point is called when the driver is being removed.
466 * This routine stops the adapter and deallocates all the resources
467 * that were allocated for driver operation.
469 * return 0 on success, positive on failure
470 *********************************************************************/
473 ixlv_detach(device_t dev)
475 struct ixlv_sc *sc = device_get_softc(dev);
476 struct ixl_vsi *vsi = &sc->vsi;
478 INIT_DBG_DEV(dev, "begin");
480 /* Make sure VLANS are not using driver */
481 if (vsi->ifp->if_vlantrunk != NULL) {
482 if_printf(vsi->ifp, "Vlan in use, detach first\n");
483 INIT_DBG_DEV(dev, "end");
488 ether_ifdetach(vsi->ifp);
489 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
492 mtx_unlock(&sc->mtx);
495 /* Unregister VLAN events */
496 if (vsi->vlan_attach != NULL)
497 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
498 if (vsi->vlan_detach != NULL)
499 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
502 callout_drain(&sc->vc_mgr.callout);
504 i40e_shutdown_adminq(&sc->hw);
505 taskqueue_free(sc->tq);
507 free(sc->vf_res, M_DEVBUF);
508 ixlv_free_pci_resources(sc);
509 ixlv_free_queues(vsi);
510 mtx_destroy(&sc->mtx);
511 ixlv_free_filters(sc);
513 bus_generic_detach(dev);
514 INIT_DBG_DEV(dev, "end");
518 /*********************************************************************
520 * Shutdown entry point
522 **********************************************************************/
525 ixlv_shutdown(device_t dev)
527 struct ixlv_sc *sc = device_get_softc(dev);
529 INIT_DBG_DEV(dev, "begin");
533 mtx_unlock(&sc->mtx);
535 INIT_DBG_DEV(dev, "end");
540 * Configure TXCSUM(IPV6) and TSO(4/6)
541 * - the hardware handles these together so we
545 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
547 /* Enable/disable TXCSUM/TSO4 */
548 if (!(ifp->if_capenable & IFCAP_TXCSUM)
549 && !(ifp->if_capenable & IFCAP_TSO4)) {
550 if (mask & IFCAP_TXCSUM) {
551 ifp->if_capenable |= IFCAP_TXCSUM;
552 /* enable TXCSUM, restore TSO if previously enabled */
553 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
554 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555 ifp->if_capenable |= IFCAP_TSO4;
558 else if (mask & IFCAP_TSO4) {
559 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
560 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
562 "TSO4 requires txcsum, enabling both...\n");
564 } else if((ifp->if_capenable & IFCAP_TXCSUM)
565 && !(ifp->if_capenable & IFCAP_TSO4)) {
566 if (mask & IFCAP_TXCSUM)
567 ifp->if_capenable &= ~IFCAP_TXCSUM;
568 else if (mask & IFCAP_TSO4)
569 ifp->if_capenable |= IFCAP_TSO4;
570 } else if((ifp->if_capenable & IFCAP_TXCSUM)
571 && (ifp->if_capenable & IFCAP_TSO4)) {
572 if (mask & IFCAP_TXCSUM) {
573 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
574 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
576 "TSO4 requires txcsum, disabling both...\n");
577 } else if (mask & IFCAP_TSO4)
578 ifp->if_capenable &= ~IFCAP_TSO4;
581 /* Enable/disable TXCSUM_IPV6/TSO6 */
582 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
583 && !(ifp->if_capenable & IFCAP_TSO6)) {
584 if (mask & IFCAP_TXCSUM_IPV6) {
585 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
586 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
587 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
588 ifp->if_capenable |= IFCAP_TSO6;
590 } else if (mask & IFCAP_TSO6) {
591 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
592 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
594 "TSO6 requires txcsum6, enabling both...\n");
596 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
597 && !(ifp->if_capenable & IFCAP_TSO6)) {
598 if (mask & IFCAP_TXCSUM_IPV6)
599 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
600 else if (mask & IFCAP_TSO6)
601 ifp->if_capenable |= IFCAP_TSO6;
602 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603 && (ifp->if_capenable & IFCAP_TSO6)) {
604 if (mask & IFCAP_TXCSUM_IPV6) {
605 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
606 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
608 "TSO6 requires txcsum6, disabling both...\n");
609 } else if (mask & IFCAP_TSO6)
610 ifp->if_capenable &= ~IFCAP_TSO6;
614 /*********************************************************************
617 * ixlv_ioctl is called when the user wants to configure the
620 * return 0 on success, positive on failure
621 **********************************************************************/
624 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
626 struct ixl_vsi *vsi = ifp->if_softc;
627 struct ixlv_sc *sc = vsi->back;
628 struct ifreq *ifr = (struct ifreq *)data;
629 #if defined(INET) || defined(INET6)
630 struct ifaddr *ifa = (struct ifaddr *)data;
631 bool avoid_reset = FALSE;
640 if (ifa->ifa_addr->sa_family == AF_INET)
644 if (ifa->ifa_addr->sa_family == AF_INET6)
647 #if defined(INET) || defined(INET6)
649 ** Calling init results in link renegotiation,
650 ** so we avoid doing it when possible.
653 ifp->if_flags |= IFF_UP;
654 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
657 if (!(ifp->if_flags & IFF_NOARP))
658 arp_ifinit(ifp, ifa);
661 error = ether_ioctl(ifp, command, data);
665 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
667 if (ifr->ifr_mtu > IXL_MAX_FRAME -
668 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
670 IOCTL_DBG_IF(ifp, "mtu too large");
672 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
673 // ERJ: Interestingly enough, these types don't match
674 ifp->if_mtu = (u_long)ifr->ifr_mtu;
675 vsi->max_frame_size =
676 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
677 + ETHER_VLAN_ENCAP_LEN;
678 ixlv_init_locked(sc);
680 mtx_unlock(&sc->mtx);
683 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
685 if (ifp->if_flags & IFF_UP) {
686 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
687 ixlv_init_locked(sc);
689 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
691 sc->if_flags = ifp->if_flags;
692 mtx_unlock(&sc->mtx);
695 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
696 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
698 ixlv_disable_intr(vsi);
700 ixlv_enable_intr(vsi);
701 mtx_unlock(&sc->mtx);
705 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
706 if (sc->init_state == IXLV_RUNNING) {
708 ixlv_disable_intr(vsi);
710 ixlv_enable_intr(vsi);
711 mtx_unlock(&sc->mtx);
716 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
717 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
721 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
722 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
724 ixlv_cap_txcsum_tso(vsi, ifp, mask);
726 if (mask & IFCAP_RXCSUM)
727 ifp->if_capenable ^= IFCAP_RXCSUM;
728 if (mask & IFCAP_RXCSUM_IPV6)
729 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
730 if (mask & IFCAP_LRO)
731 ifp->if_capenable ^= IFCAP_LRO;
732 if (mask & IFCAP_VLAN_HWTAGGING)
733 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
734 if (mask & IFCAP_VLAN_HWFILTER)
735 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
736 if (mask & IFCAP_VLAN_HWTSO)
737 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
738 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
741 VLAN_CAPABILITIES(ifp);
747 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
748 error = ether_ioctl(ifp, command, data);
756 ** To do a reinit on the VF is unfortunately more complicated
757 ** than a physical device, we must have the PF more or less
758 ** completely recreate our memory, so many things that were
759 ** done only once at attach in traditional drivers now must be
760 ** redone at each reinitialization. This function does that
761 ** 'prelude' so we can then call the normal locked init code.
764 ixlv_reinit_locked(struct ixlv_sc *sc)
766 struct i40e_hw *hw = &sc->hw;
767 struct ixl_vsi *vsi = &sc->vsi;
768 struct ifnet *ifp = vsi->ifp;
769 struct ixlv_mac_filter *mf, *mf_temp;
770 struct ixlv_vlan_filter *vf;
773 INIT_DBG_IF(ifp, "begin");
775 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
778 error = ixlv_reset(sc);
780 INIT_DBG_IF(ifp, "VF was reset");
782 /* set the state in case we went thru RESET */
783 sc->init_state = IXLV_RUNNING;
786 ** Resetting the VF drops all filters from hardware;
787 ** we need to mark them to be re-added in init.
789 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
790 if (mf->flags & IXL_FILTER_DEL) {
791 SLIST_REMOVE(sc->mac_filters, mf,
792 ixlv_mac_filter, next);
795 mf->flags |= IXL_FILTER_ADD;
797 if (vsi->num_vlans != 0)
798 SLIST_FOREACH(vf, sc->vlan_filters, next)
799 vf->flags = IXL_FILTER_ADD;
800 else { /* clean any stale filters */
801 while (!SLIST_EMPTY(sc->vlan_filters)) {
802 vf = SLIST_FIRST(sc->vlan_filters);
803 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
808 ixlv_enable_adminq_irq(hw);
809 ixl_vc_flush(&sc->vc_mgr);
811 INIT_DBG_IF(ifp, "end");
816 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
817 enum i40e_status_code code)
824 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
825 * happens while a command is in progress, so we don't print an error
828 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
829 if_printf(sc->vsi.ifp,
830 "Error %d waiting for PF to complete operation %d\n",
836 ixlv_init_locked(struct ixlv_sc *sc)
838 struct i40e_hw *hw = &sc->hw;
839 struct ixl_vsi *vsi = &sc->vsi;
840 struct ixl_queue *que = vsi->queues;
841 struct ifnet *ifp = vsi->ifp;
844 INIT_DBG_IF(ifp, "begin");
846 IXLV_CORE_LOCK_ASSERT(sc);
848 /* Do a reinit first if an init has already been done */
849 if ((sc->init_state == IXLV_RUNNING) ||
850 (sc->init_state == IXLV_RESET_REQUIRED) ||
851 (sc->init_state == IXLV_RESET_PENDING))
852 error = ixlv_reinit_locked(sc);
853 /* Don't bother with init if we failed reinit */
857 /* Remove existing MAC filter if new MAC addr is set */
858 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
859 error = ixlv_del_mac_filter(sc, hw->mac.addr);
861 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
862 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
866 /* Check for an LAA mac address... */
867 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
869 ifp->if_hwassist = 0;
870 if (ifp->if_capenable & IFCAP_TSO)
871 ifp->if_hwassist |= CSUM_TSO;
872 if (ifp->if_capenable & IFCAP_TXCSUM)
873 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
874 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
875 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
877 /* Add mac filter for this VF to PF */
878 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
879 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
880 if (!error || error == EEXIST)
881 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
882 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
886 /* Setup vlan's if needed */
887 ixlv_setup_vlan_filters(sc);
889 /* Prepare the queues for operation */
890 for (int i = 0; i < vsi->num_queues; i++, que++) {
891 struct rx_ring *rxr = &que->rxr;
893 ixl_init_tx_ring(que);
895 if (vsi->max_frame_size <= MCLBYTES)
896 rxr->mbuf_sz = MCLBYTES;
898 rxr->mbuf_sz = MJUMPAGESIZE;
899 ixl_init_rx_ring(que);
902 /* Configure queues */
903 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
904 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
910 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
911 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
914 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
915 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
917 /* Start the local timer */
918 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
920 sc->init_state = IXLV_RUNNING;
923 INIT_DBG_IF(ifp, "end");
928 ** Init entry point for the stack
933 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
934 struct ixlv_sc *sc = vsi->back;
938 ixlv_init_locked(sc);
939 mtx_unlock(&sc->mtx);
941 /* Wait for init_locked to finish */
942 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
943 && ++retries < 100) {
946 if (retries >= IXLV_AQ_MAX_ERR)
948 "Init failed to complete in alloted time!\n");
952 * ixlv_attach() helper function; gathers information about
953 * the (virtual) hardware for use elsewhere in the driver.
956 ixlv_init_hw(struct ixlv_sc *sc)
958 struct i40e_hw *hw = &sc->hw;
959 device_t dev = sc->dev;
961 /* Save off the information about this board */
962 hw->vendor_id = pci_get_vendor(dev);
963 hw->device_id = pci_get_device(dev);
964 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
965 hw->subsystem_vendor_id =
966 pci_read_config(dev, PCIR_SUBVEND_0, 2);
967 hw->subsystem_device_id =
968 pci_read_config(dev, PCIR_SUBDEV_0, 2);
970 hw->bus.device = pci_get_slot(dev);
971 hw->bus.func = pci_get_function(dev);
975 * ixlv_attach() helper function; initalizes the admin queue
976 * and attempts to establish contact with the PF by
977 * retrying the initial "API version" message several times
978 * or until the PF responds.
981 ixlv_setup_vc(struct ixlv_sc *sc)
983 struct i40e_hw *hw = &sc->hw;
984 device_t dev = sc->dev;
985 int error = 0, ret_error = 0, asq_retries = 0;
986 bool send_api_ver_retried = 0;
988 /* Need to set these AQ paramters before initializing AQ */
989 hw->aq.num_arq_entries = IXL_AQ_LEN;
990 hw->aq.num_asq_entries = IXL_AQ_LEN;
991 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
992 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
994 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
995 /* Initialize admin queue */
996 error = i40e_init_adminq(hw);
998 device_printf(dev, "%s: init_adminq failed: %d\n",
1004 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1007 /* Send VF's API version */
1008 error = ixlv_send_api_ver(sc);
1010 i40e_shutdown_adminq(hw);
1012 device_printf(dev, "%s: unable to send api"
1013 " version to PF on attempt %d, error %d\n",
1014 __func__, i+1, error);
1018 while (!i40e_asq_done(hw)) {
1019 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1020 i40e_shutdown_adminq(hw);
1021 DDPRINTF(dev, "Admin Queue timeout "
1022 "(waiting for send_api_ver), %d more retries...",
1023 IXLV_AQ_MAX_ERR - (i + 1));
1027 i40e_msec_delay(10);
1029 if (asq_retries > IXLV_AQ_MAX_ERR)
1032 INIT_DBG_DEV(dev, "Sent API version message to PF");
1034 /* Verify that the VF accepts the PF's API version */
1035 error = ixlv_verify_api_ver(sc);
1036 if (error == ETIMEDOUT) {
1037 if (!send_api_ver_retried) {
1038 /* Resend message, one more time */
1039 send_api_ver_retried++;
1041 "%s: Timeout while verifying API version on first"
1042 " try!\n", __func__);
1046 "%s: Timeout while verifying API version on second"
1047 " try!\n", __func__);
1054 "%s: Unable to verify API version,"
1055 " error %d\n", __func__, error);
1062 i40e_shutdown_adminq(hw);
1067 * ixlv_attach() helper function; asks the PF for this VF's
1068 * configuration, and saves the information if it receives it.
1071 ixlv_vf_config(struct ixlv_sc *sc)
1073 struct i40e_hw *hw = &sc->hw;
1074 device_t dev = sc->dev;
1075 int bufsz, error = 0, ret_error = 0;
1076 int asq_retries, retried = 0;
1079 error = ixlv_send_vf_config_msg(sc);
1082 "%s: Unable to send VF config request, attempt %d,"
1083 " error %d\n", __func__, retried + 1, error);
1088 while (!i40e_asq_done(hw)) {
1089 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1090 device_printf(dev, "%s: Admin Queue timeout "
1091 "(waiting for send_vf_config_msg), attempt %d\n",
1092 __func__, retried + 1);
1096 i40e_msec_delay(10);
1099 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1103 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1104 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1105 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1108 "%s: Unable to allocate memory for VF configuration"
1109 " message from PF on attempt %d\n", __func__, retried + 1);
1115 /* Check for VF config response */
1116 error = ixlv_get_vf_config(sc);
1117 if (error == ETIMEDOUT) {
1118 /* The 1st time we timeout, send the configuration message again */
1126 "%s: Unable to get VF configuration from PF after %d tries!\n",
1127 __func__, retried + 1);
1133 free(sc->vf_res, M_DEVBUF);
1139 * Allocate MSI/X vectors, setup the AQ vector early
1142 ixlv_init_msix(struct ixlv_sc *sc)
1144 device_t dev = sc->dev;
1145 int rid, want, vectors, queues, available;
1147 rid = PCIR_BAR(IXL_BAR);
1148 sc->msix_mem = bus_alloc_resource_any(dev,
1149 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1150 if (!sc->msix_mem) {
1151 /* May not be enabled */
1152 device_printf(sc->dev,
1153 "Unable to map MSIX table \n");
1157 available = pci_msix_count(dev);
1158 if (available == 0) { /* system has msix disabled */
1159 bus_release_resource(dev, SYS_RES_MEMORY,
1161 sc->msix_mem = NULL;
1165 /* Figure out a reasonable auto config value */
1166 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1168 /* Override with hardcoded value if sane */
1169 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1170 queues = ixlv_max_queues;
1172 /* If we're doing RSS, clamp at the number of RSS buckets */
1173 if (queues > rss_getnumbuckets())
1174 queues = rss_getnumbuckets();
1176 /* Enforce the VF max value */
1177 if (queues > IXLV_MAX_QUEUES)
1178 queues = IXLV_MAX_QUEUES;
1181 ** Want one vector (RX/TX pair) per queue
1182 ** plus an additional for the admin queue.
1185 if (want <= available) /* Have enough */
1188 device_printf(sc->dev,
1189 "MSIX Configuration Problem, "
1190 "%d vectors available but %d wanted!\n",
1197 * If we're doing RSS, the number of queues needs to
1198 * match the number of RSS buckets that are configured.
1200 * + If there's more queues than RSS buckets, we'll end
1201 * up with queues that get no traffic.
1203 * + If there's more RSS buckets than queues, we'll end
1204 * up having multiple RSS buckets map to the same queue,
1205 * so there'll be some contention.
1207 if (queues != rss_getnumbuckets()) {
1209 "%s: queues (%d) != RSS buckets (%d)"
1210 "; performance will be impacted.\n",
1211 __func__, queues, rss_getnumbuckets());
1215 if (pci_alloc_msix(dev, &vectors) == 0) {
1216 device_printf(sc->dev,
1217 "Using MSIX interrupts with %d vectors\n", vectors);
1219 sc->vsi.num_queues = queues;
1223 ** Explicitly set the guest PCI BUSMASTER capability
1224 ** and we must rewrite the ENABLE in the MSIX control
1225 ** register again at this point to cause the host to
1226 ** successfully initialize us.
1231 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1232 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1233 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1234 pci_find_cap(dev, PCIY_MSIX, &rid);
1235 rid += PCIR_MSIX_CTRL;
1236 msix_ctrl = pci_read_config(dev, rid, 2);
1237 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1238 pci_write_config(dev, rid, msix_ctrl, 2);
1241 /* Next we need to setup the vector for the Admin Queue */
1242 rid = 1; // zero vector + 1
1243 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1244 &rid, RF_SHAREABLE | RF_ACTIVE);
1245 if (sc->res == NULL) {
1246 device_printf(dev,"Unable to allocate"
1247 " bus resource: AQ interrupt \n");
1250 if (bus_setup_intr(dev, sc->res,
1251 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1252 ixlv_msix_adminq, sc, &sc->tag)) {
1254 device_printf(dev, "Failed to register AQ handler");
1257 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1262 /* The VF driver MUST use MSIX */
1267 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1270 device_t dev = sc->dev;
1273 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1276 if (!(sc->pci_mem)) {
1277 device_printf(dev,"Unable to allocate bus resource: memory\n");
1281 sc->osdep.mem_bus_space_tag =
1282 rman_get_bustag(sc->pci_mem);
1283 sc->osdep.mem_bus_space_handle =
1284 rman_get_bushandle(sc->pci_mem);
1285 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1286 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1287 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1289 sc->hw.back = &sc->osdep;
1291 /* Disable adminq interrupts */
1292 ixlv_disable_adminq_irq(&sc->hw);
1295 ** Now setup MSI/X, it will return
1296 ** us the number of supported vectors
1298 sc->msix = ixlv_init_msix(sc);
1300 /* We fail without MSIX support */
1308 ixlv_free_pci_resources(struct ixlv_sc *sc)
1310 struct ixl_vsi *vsi = &sc->vsi;
1311 struct ixl_queue *que = vsi->queues;
1312 device_t dev = sc->dev;
1314 /* We may get here before stations are setup */
1319 ** Release all msix queue resources:
1321 for (int i = 0; i < vsi->num_queues; i++, que++) {
1322 int rid = que->msix + 1;
1323 if (que->tag != NULL) {
1324 bus_teardown_intr(dev, que->res, que->tag);
1327 if (que->res != NULL)
1328 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1332 /* Clean the AdminQ interrupt */
1333 if (sc->tag != NULL) {
1334 bus_teardown_intr(dev, sc->res, sc->tag);
1337 if (sc->res != NULL)
1338 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1340 pci_release_msi(dev);
1342 if (sc->msix_mem != NULL)
1343 bus_release_resource(dev, SYS_RES_MEMORY,
1344 PCIR_BAR(IXL_BAR), sc->msix_mem);
1346 if (sc->pci_mem != NULL)
1347 bus_release_resource(dev, SYS_RES_MEMORY,
1348 PCIR_BAR(0), sc->pci_mem);
1354 * Create taskqueue and tasklet for Admin Queue interrupts.
1357 ixlv_init_taskqueue(struct ixlv_sc *sc)
1361 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1363 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1364 taskqueue_thread_enqueue, &sc->tq);
1365 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1366 device_get_nameunit(sc->dev));
1371 /*********************************************************************
1373 * Setup MSIX Interrupt resources and handlers for the VSI queues
1375 **********************************************************************/
1377 ixlv_assign_msix(struct ixlv_sc *sc)
1379 device_t dev = sc->dev;
1380 struct ixl_vsi *vsi = &sc->vsi;
1381 struct ixl_queue *que = vsi->queues;
1382 struct tx_ring *txr;
1383 int error, rid, vector = 1;
1385 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1389 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1390 RF_SHAREABLE | RF_ACTIVE);
1391 if (que->res == NULL) {
1392 device_printf(dev,"Unable to allocate"
1393 " bus resource: que interrupt [%d]\n", vector);
1396 /* Set the handler function */
1397 error = bus_setup_intr(dev, que->res,
1398 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1399 ixlv_msix_que, que, &que->tag);
1402 device_printf(dev, "Failed to register que handler");
1405 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1406 /* Bind the vector to a CPU */
1408 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1410 bus_bind_intr(dev, que->res, cpu_id);
1412 vsi->que_mask |= (u64)(1 << que->msix);
1413 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1414 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1415 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1416 taskqueue_thread_enqueue, &que->tq);
1418 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1419 cpu_id, "%s (bucket %d)",
1420 device_get_nameunit(dev), cpu_id);
1422 taskqueue_start_threads(&que->tq, 1, PI_NET,
1423 "%s que", device_get_nameunit(dev));
1432 ** Requests a VF reset from the PF.
1434 ** Requires the VF's Admin Queue to be initialized.
1437 ixlv_reset(struct ixlv_sc *sc)
1439 struct i40e_hw *hw = &sc->hw;
1440 device_t dev = sc->dev;
1443 /* Ask the PF to reset us if we are initiating */
1444 if (sc->init_state != IXLV_RESET_PENDING)
1445 ixlv_request_reset(sc);
1447 i40e_msec_delay(100);
1448 error = ixlv_reset_complete(hw);
1450 device_printf(dev, "%s: VF reset failed\n",
1455 error = i40e_shutdown_adminq(hw);
1457 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1462 error = i40e_init_adminq(hw);
1464 device_printf(dev, "%s: init_adminq failed: %d\n",
1473 ixlv_reset_complete(struct i40e_hw *hw)
1477 for (int i = 0; i < 100; i++) {
1478 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1479 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1481 if ((reg == I40E_VFR_VFACTIVE) ||
1482 (reg == I40E_VFR_COMPLETED))
1484 i40e_msec_delay(100);
1491 /*********************************************************************
1493 * Setup networking device structure and register an interface.
1495 **********************************************************************/
1497 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1500 struct ixl_vsi *vsi = &sc->vsi;
1501 struct ixl_queue *que = vsi->queues;
1503 INIT_DBG_DEV(dev, "begin");
1505 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1507 device_printf(dev, "%s: could not allocate ifnet"
1508 " structure!\n", __func__);
1512 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1514 ifp->if_mtu = ETHERMTU;
1515 ifp->if_baudrate = 4000000000; // ??
1516 ifp->if_init = ixlv_init;
1517 ifp->if_softc = vsi;
1518 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1519 ifp->if_ioctl = ixlv_ioctl;
1521 #if __FreeBSD_version >= 1100000
1522 if_setgetcounterfn(ifp, ixl_get_counter);
1525 ifp->if_transmit = ixl_mq_start;
1527 ifp->if_qflush = ixl_qflush;
1528 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1530 ether_ifattach(ifp, sc->hw.mac.addr);
1532 vsi->max_frame_size =
1533 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1534 + ETHER_VLAN_ENCAP_LEN;
1537 * Tell the upper layer(s) we support long frames.
1539 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1541 ifp->if_capabilities |= IFCAP_HWCSUM;
1542 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1543 ifp->if_capabilities |= IFCAP_TSO;
1544 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1546 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1551 ifp->if_capenable = ifp->if_capabilities;
1554 ** Don't turn this on by default, if vlans are
1555 ** created on another pseudo device (eg. lagg)
1556 ** then vlan events are not passed thru, breaking
1557 ** operation, but with HW FILTER off it works. If
1558 ** using vlans directly on the ixl driver you can
1559 ** enable this and get full hardware tag filtering.
1561 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1564 * Specify the media types supported by this adapter and register
1565 * callbacks to update media and link information
1567 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1570 // JFV Add media types later?
1572 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1573 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1575 INIT_DBG_DEV(dev, "end");
1580 ** Allocate and setup the interface queues
1583 ixlv_setup_queues(struct ixlv_sc *sc)
1585 device_t dev = sc->dev;
1586 struct ixl_vsi *vsi;
1587 struct ixl_queue *que;
1588 struct tx_ring *txr;
1589 struct rx_ring *rxr;
1591 int error = I40E_SUCCESS;
1594 vsi->back = (void *)sc;
1598 /* Get memory for the station queues */
1600 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1601 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1602 device_printf(dev, "Unable to allocate queue memory\n");
1607 for (int i = 0; i < vsi->num_queues; i++) {
1608 que = &vsi->queues[i];
1609 que->num_desc = ixlv_ringsz;
1612 /* mark the queue as active */
1613 vsi->active_queues |= (u64)1 << que->me;
1617 txr->tail = I40E_QTX_TAIL1(que->me);
1618 /* Initialize the TX lock */
1619 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1620 device_get_nameunit(dev), que->me);
1621 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1623 ** Create the TX descriptor ring, the extra int is
1624 ** added as the location for HEAD WB.
1626 tsize = roundup2((que->num_desc *
1627 sizeof(struct i40e_tx_desc)) +
1628 sizeof(u32), DBA_ALIGN);
1629 if (i40e_allocate_dma_mem(&sc->hw,
1630 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1632 "Unable to allocate TX Descriptor memory\n");
1636 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1637 bzero((void *)txr->base, tsize);
1638 /* Now allocate transmit soft structs for the ring */
1639 if (ixl_allocate_tx_data(que)) {
1641 "Critical Failure setting up TX structures\n");
1645 /* Allocate a buf ring */
1646 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1647 M_WAITOK, &txr->mtx);
1648 if (txr->br == NULL) {
1650 "Critical Failure setting up TX buf ring\n");
1656 * Next the RX queues...
1658 rsize = roundup2(que->num_desc *
1659 sizeof(union i40e_rx_desc), DBA_ALIGN);
1662 rxr->tail = I40E_QRX_TAIL1(que->me);
1664 /* Initialize the RX side lock */
1665 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1666 device_get_nameunit(dev), que->me);
1667 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1669 if (i40e_allocate_dma_mem(&sc->hw,
1670 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1672 "Unable to allocate RX Descriptor memory\n");
1676 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1677 bzero((void *)rxr->base, rsize);
1679 /* Allocate receive soft structs for the ring*/
1680 if (ixl_allocate_rx_data(que)) {
1682 "Critical Failure setting up receive structs\n");
1691 for (int i = 0; i < vsi->num_queues; i++) {
1692 que = &vsi->queues[i];
1696 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1698 i40e_free_dma_mem(&sc->hw, &txr->dma);
1700 free(vsi->queues, M_DEVBUF);
1707 ** This routine is run via an vlan config EVENT,
1708 ** it enables us to use the HW Filter table since
1709 ** we can get the vlan id. This just creates the
1710 ** entry in the soft version of the VFTA, init will
1711 ** repopulate the real table.
1714 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1716 struct ixl_vsi *vsi = arg;
1717 struct ixlv_sc *sc = vsi->back;
1718 struct ixlv_vlan_filter *v;
1721 if (ifp->if_softc != arg) /* Not our event */
1724 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1727 /* Sanity check - make sure it doesn't already exist */
1728 SLIST_FOREACH(v, sc->vlan_filters, next) {
1729 if (v->vlan == vtag)
1735 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1736 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1738 v->flags = IXL_FILTER_ADD;
1739 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1740 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1741 mtx_unlock(&sc->mtx);
1746 ** This routine is run via an vlan
1747 ** unconfig EVENT, remove our entry
1748 ** in the soft vfta.
1751 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1753 struct ixl_vsi *vsi = arg;
1754 struct ixlv_sc *sc = vsi->back;
1755 struct ixlv_vlan_filter *v;
1758 if (ifp->if_softc != arg)
1761 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1765 SLIST_FOREACH(v, sc->vlan_filters, next) {
1766 if (v->vlan == vtag) {
1767 v->flags = IXL_FILTER_DEL;
1773 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1774 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1775 mtx_unlock(&sc->mtx);
1780 ** Get a new filter and add it to the mac filter list.
1782 static struct ixlv_mac_filter *
1783 ixlv_get_mac_filter(struct ixlv_sc *sc)
1785 struct ixlv_mac_filter *f;
1787 f = malloc(sizeof(struct ixlv_mac_filter),
1788 M_DEVBUF, M_NOWAIT | M_ZERO);
1790 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1796 ** Find the filter with matching MAC address
1798 static struct ixlv_mac_filter *
1799 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1801 struct ixlv_mac_filter *f;
1804 SLIST_FOREACH(f, sc->mac_filters, next) {
1805 if (cmp_etheraddr(f->macaddr, macaddr)) {
1817 ** Admin Queue interrupt handler
1820 ixlv_msix_adminq(void *arg)
1822 struct ixlv_sc *sc = arg;
1823 struct i40e_hw *hw = &sc->hw;
1826 reg = rd32(hw, I40E_VFINT_ICR01);
1827 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1829 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1830 reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1831 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1834 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1839 ixlv_enable_intr(struct ixl_vsi *vsi)
1841 struct i40e_hw *hw = vsi->hw;
1842 struct ixl_queue *que = vsi->queues;
1844 ixlv_enable_adminq_irq(hw);
1845 for (int i = 0; i < vsi->num_queues; i++, que++)
1846 ixlv_enable_queue_irq(hw, que->me);
1850 ixlv_disable_intr(struct ixl_vsi *vsi)
1852 struct i40e_hw *hw = vsi->hw;
1853 struct ixl_queue *que = vsi->queues;
1855 ixlv_disable_adminq_irq(hw);
1856 for (int i = 0; i < vsi->num_queues; i++, que++)
1857 ixlv_disable_queue_irq(hw, que->me);
1862 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1864 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1865 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1867 rd32(hw, I40E_VFGEN_RSTAT);
1872 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1874 wr32(hw, I40E_VFINT_DYN_CTL01,
1875 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1876 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1877 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1879 rd32(hw, I40E_VFGEN_RSTAT);
1884 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1888 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1889 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
1890 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1894 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1896 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1897 rd32(hw, I40E_VFGEN_RSTAT);
1903 ** Provide a update to the queue RX
1904 ** interrupt moderation value.
1907 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1909 struct ixl_vsi *vsi = que->vsi;
1910 struct i40e_hw *hw = vsi->hw;
1911 struct rx_ring *rxr = &que->rxr;
1917 /* Idle, do nothing */
1918 if (rxr->bytes == 0)
1921 if (ixlv_dynamic_rx_itr) {
1922 rx_bytes = rxr->bytes/rxr->itr;
1925 /* Adjust latency range */
1926 switch (rxr->latency) {
1927 case IXL_LOW_LATENCY:
1928 if (rx_bytes > 10) {
1929 rx_latency = IXL_AVE_LATENCY;
1930 rx_itr = IXL_ITR_20K;
1933 case IXL_AVE_LATENCY:
1934 if (rx_bytes > 20) {
1935 rx_latency = IXL_BULK_LATENCY;
1936 rx_itr = IXL_ITR_8K;
1937 } else if (rx_bytes <= 10) {
1938 rx_latency = IXL_LOW_LATENCY;
1939 rx_itr = IXL_ITR_100K;
1942 case IXL_BULK_LATENCY:
1943 if (rx_bytes <= 20) {
1944 rx_latency = IXL_AVE_LATENCY;
1945 rx_itr = IXL_ITR_20K;
1950 rxr->latency = rx_latency;
1952 if (rx_itr != rxr->itr) {
1953 /* do an exponential smoothing */
1954 rx_itr = (10 * rx_itr * rxr->itr) /
1955 ((9 * rx_itr) + rxr->itr);
1956 rxr->itr = rx_itr & IXL_MAX_ITR;
1957 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1958 que->me), rxr->itr);
1960 } else { /* We may have have toggled to non-dynamic */
1961 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1962 vsi->rx_itr_setting = ixlv_rx_itr;
1963 /* Update the hardware if needed */
1964 if (rxr->itr != vsi->rx_itr_setting) {
1965 rxr->itr = vsi->rx_itr_setting;
1966 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1967 que->me), rxr->itr);
1977 ** Provide a update to the queue TX
1978 ** interrupt moderation value.
1981 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1983 struct ixl_vsi *vsi = que->vsi;
1984 struct i40e_hw *hw = vsi->hw;
1985 struct tx_ring *txr = &que->txr;
1991 /* Idle, do nothing */
1992 if (txr->bytes == 0)
1995 if (ixlv_dynamic_tx_itr) {
1996 tx_bytes = txr->bytes/txr->itr;
1999 switch (txr->latency) {
2000 case IXL_LOW_LATENCY:
2001 if (tx_bytes > 10) {
2002 tx_latency = IXL_AVE_LATENCY;
2003 tx_itr = IXL_ITR_20K;
2006 case IXL_AVE_LATENCY:
2007 if (tx_bytes > 20) {
2008 tx_latency = IXL_BULK_LATENCY;
2009 tx_itr = IXL_ITR_8K;
2010 } else if (tx_bytes <= 10) {
2011 tx_latency = IXL_LOW_LATENCY;
2012 tx_itr = IXL_ITR_100K;
2015 case IXL_BULK_LATENCY:
2016 if (tx_bytes <= 20) {
2017 tx_latency = IXL_AVE_LATENCY;
2018 tx_itr = IXL_ITR_20K;
2023 txr->latency = tx_latency;
2025 if (tx_itr != txr->itr) {
2026 /* do an exponential smoothing */
2027 tx_itr = (10 * tx_itr * txr->itr) /
2028 ((9 * tx_itr) + txr->itr);
2029 txr->itr = tx_itr & IXL_MAX_ITR;
2030 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2031 que->me), txr->itr);
2034 } else { /* We may have have toggled to non-dynamic */
2035 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2036 vsi->tx_itr_setting = ixlv_tx_itr;
2037 /* Update the hardware if needed */
2038 if (txr->itr != vsi->tx_itr_setting) {
2039 txr->itr = vsi->tx_itr_setting;
2040 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2041 que->me), txr->itr);
2052 ** MSIX Interrupt Handlers and Tasklets
2056 ixlv_handle_que(void *context, int pending)
2058 struct ixl_queue *que = context;
2059 struct ixl_vsi *vsi = que->vsi;
2060 struct i40e_hw *hw = vsi->hw;
2061 struct tx_ring *txr = &que->txr;
2062 struct ifnet *ifp = vsi->ifp;
2065 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2066 more = ixl_rxeof(que, IXL_RX_LIMIT);
2067 mtx_lock(&txr->mtx);
2069 if (!drbr_empty(ifp, txr->br))
2070 ixl_mq_start_locked(ifp, txr);
2071 mtx_unlock(&txr->mtx);
2073 taskqueue_enqueue(que->tq, &que->task);
2078 /* Reenable this interrupt - hmmm */
2079 ixlv_enable_queue_irq(hw, que->me);
2084 /*********************************************************************
2086 * MSIX Queue Interrupt Service routine
2088 **********************************************************************/
2090 ixlv_msix_que(void *arg)
2092 struct ixl_queue *que = arg;
2093 struct ixl_vsi *vsi = que->vsi;
2094 struct i40e_hw *hw = vsi->hw;
2095 struct tx_ring *txr = &que->txr;
2096 bool more_tx, more_rx;
2098 /* Spurious interrupts are ignored */
2099 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2104 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2106 mtx_lock(&txr->mtx);
2107 more_tx = ixl_txeof(que);
2109 ** Make certain that if the stack
2110 ** has anything queued the task gets
2111 ** scheduled to handle it.
2113 if (!drbr_empty(vsi->ifp, txr->br))
2115 mtx_unlock(&txr->mtx);
2117 ixlv_set_queue_rx_itr(que);
2118 ixlv_set_queue_tx_itr(que);
2120 if (more_tx || more_rx)
2121 taskqueue_enqueue(que->tq, &que->task);
2123 ixlv_enable_queue_irq(hw, que->me);
2129 /*********************************************************************
2131 * Media Ioctl callback
2133 * This routine is called whenever the user queries the status of
2134 * the interface using ifconfig.
2136 **********************************************************************/
2138 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2140 struct ixl_vsi *vsi = ifp->if_softc;
2141 struct ixlv_sc *sc = vsi->back;
2143 INIT_DBG_IF(ifp, "begin");
2147 ixlv_update_link_status(sc);
2149 ifmr->ifm_status = IFM_AVALID;
2150 ifmr->ifm_active = IFM_ETHER;
2153 mtx_unlock(&sc->mtx);
2154 INIT_DBG_IF(ifp, "end: link not up");
2158 ifmr->ifm_status |= IFM_ACTIVE;
2159 /* Hardware is always full-duplex */
2160 ifmr->ifm_active |= IFM_FDX;
2161 mtx_unlock(&sc->mtx);
2162 INIT_DBG_IF(ifp, "end");
2166 /*********************************************************************
2168 * Media Ioctl callback
2170 * This routine is called when the user changes speed/duplex using
2171 * media/mediopt option with ifconfig.
2173 **********************************************************************/
2175 ixlv_media_change(struct ifnet * ifp)
2177 struct ixl_vsi *vsi = ifp->if_softc;
2178 struct ifmedia *ifm = &vsi->media;
2180 INIT_DBG_IF(ifp, "begin");
2182 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2185 INIT_DBG_IF(ifp, "end");
2190 /*********************************************************************
2191 * Multicast Initialization
2193 * This routine is called by init to reset a fresh state.
2195 **********************************************************************/
2198 ixlv_init_multi(struct ixl_vsi *vsi)
2200 struct ixlv_mac_filter *f;
2201 struct ixlv_sc *sc = vsi->back;
2204 IOCTL_DBG_IF(vsi->ifp, "begin");
2206 /* First clear any multicast filters */
2207 SLIST_FOREACH(f, sc->mac_filters, next) {
2208 if ((f->flags & IXL_FILTER_USED)
2209 && (f->flags & IXL_FILTER_MC)) {
2210 f->flags |= IXL_FILTER_DEL;
2215 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2216 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2219 IOCTL_DBG_IF(vsi->ifp, "end");
2223 ixlv_add_multi(struct ixl_vsi *vsi)
2225 struct ifmultiaddr *ifma;
2226 struct ifnet *ifp = vsi->ifp;
2227 struct ixlv_sc *sc = vsi->back;
2230 IOCTL_DBG_IF(ifp, "begin");
2232 if_maddr_rlock(ifp);
2234 ** Get a count, to decide if we
2235 ** simply use multicast promiscuous.
2237 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2238 if (ifma->ifma_addr->sa_family != AF_LINK)
2242 if_maddr_runlock(ifp);
2244 // TODO: Remove -- cannot set promiscuous mode in a VF
2245 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2246 /* delete all multicast filters */
2247 ixlv_init_multi(vsi);
2248 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2249 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2250 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2252 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2257 if_maddr_rlock(ifp);
2258 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2259 if (ifma->ifma_addr->sa_family != AF_LINK)
2261 if (!ixlv_add_mac_filter(sc,
2262 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2266 if_maddr_runlock(ifp);
2268 ** Notify AQ task that sw filters need to be
2272 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2273 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2276 IOCTL_DBG_IF(ifp, "end");
2280 ixlv_del_multi(struct ixl_vsi *vsi)
2282 struct ixlv_mac_filter *f;
2283 struct ifmultiaddr *ifma;
2284 struct ifnet *ifp = vsi->ifp;
2285 struct ixlv_sc *sc = vsi->back;
2289 IOCTL_DBG_IF(ifp, "begin");
2291 /* Search for removed multicast addresses */
2292 if_maddr_rlock(ifp);
2293 SLIST_FOREACH(f, sc->mac_filters, next) {
2294 if ((f->flags & IXL_FILTER_USED)
2295 && (f->flags & IXL_FILTER_MC)) {
2296 /* check if mac address in filter is in sc's list */
2298 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2299 if (ifma->ifma_addr->sa_family != AF_LINK)
2302 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2303 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2308 /* if this filter is not in the sc's list, remove it */
2309 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2310 f->flags |= IXL_FILTER_DEL;
2312 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2313 MAC_FORMAT_ARGS(f->macaddr));
2315 else if (match == FALSE)
2316 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2317 MAC_FORMAT_ARGS(f->macaddr));
2320 if_maddr_runlock(ifp);
2323 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2324 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2327 IOCTL_DBG_IF(ifp, "end");
2330 /*********************************************************************
2333 * This routine checks for link status,updates statistics,
2334 * and runs the watchdog check.
2336 **********************************************************************/
2339 ixlv_local_timer(void *arg)
2341 struct ixlv_sc *sc = arg;
2342 struct i40e_hw *hw = &sc->hw;
2343 struct ixl_vsi *vsi = &sc->vsi;
2344 struct ixl_queue *que = vsi->queues;
2345 device_t dev = sc->dev;
2349 IXLV_CORE_LOCK_ASSERT(sc);
2351 /* If Reset is in progress just bail */
2352 if (sc->init_state == IXLV_RESET_PENDING)
2355 /* Check for when PF triggers a VF reset */
2356 val = rd32(hw, I40E_VFGEN_RSTAT) &
2357 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2359 if (val != I40E_VFR_VFACTIVE
2360 && val != I40E_VFR_COMPLETED) {
2361 DDPRINTF(dev, "reset in progress! (%d)", val);
2365 ixlv_request_stats(sc);
2367 /* clean and process any events */
2368 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2371 ** Check status on the queues for a hang
2373 mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2374 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2376 for (int i = 0; i < vsi->num_queues; i++,que++) {
2377 /* Any queues with outstanding work get a sw irq */
2379 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2381 ** Each time txeof runs without cleaning, but there
2382 ** are uncleaned descriptors it increments busy. If
2383 ** we get to 5 we declare it hung.
2385 if (que->busy == IXL_QUEUE_HUNG) {
2387 /* Mark the queue as inactive */
2388 vsi->active_queues &= ~((u64)1 << que->me);
2391 /* Check if we've come back from hung */
2392 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2393 vsi->active_queues |= ((u64)1 << que->me);
2395 if (que->busy >= IXL_MAX_TX_BUSY) {
2396 device_printf(dev,"Warning queue %d "
2397 "appears to be hung!\n", i);
2398 que->busy = IXL_QUEUE_HUNG;
2402 /* Only reset when all queues show hung */
2403 if (hung == vsi->num_queues)
2405 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2409 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2410 sc->init_state = IXLV_RESET_REQUIRED;
2411 ixlv_init_locked(sc);
2415 ** Note: this routine updates the OS on the link state
2416 ** the real check of the hardware only happens with
2417 ** a link interrupt.
2420 ixlv_update_link_status(struct ixlv_sc *sc)
2422 struct ixl_vsi *vsi = &sc->vsi;
2423 struct ifnet *ifp = vsi->ifp;
2426 if (vsi->link_active == FALSE) {
2428 if_printf(ifp,"Link is Up, %d Gbps\n",
2429 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2430 vsi->link_active = TRUE;
2431 if_link_state_change(ifp, LINK_STATE_UP);
2433 } else { /* Link down */
2434 if (vsi->link_active == TRUE) {
2436 if_printf(ifp,"Link is Down\n");
2437 if_link_state_change(ifp, LINK_STATE_DOWN);
2438 vsi->link_active = FALSE;
2445 /*********************************************************************
2447 * This routine disables all traffic on the adapter by issuing a
2448 * global reset on the MAC and deallocates TX/RX buffers.
2450 **********************************************************************/
2453 ixlv_stop(struct ixlv_sc *sc)
2459 INIT_DBG_IF(ifp, "begin");
2461 IXLV_CORE_LOCK_ASSERT(sc);
2463 ixl_vc_flush(&sc->vc_mgr);
2464 ixlv_disable_queues(sc);
2467 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2468 ((ticks - start) < hz/10))
2469 ixlv_do_adminq_locked(sc);
2471 /* Stop the local timer */
2472 callout_stop(&sc->timer);
2474 INIT_DBG_IF(ifp, "end");
2478 /*********************************************************************
2480 * Free all station queue structs.
2482 **********************************************************************/
2484 ixlv_free_queues(struct ixl_vsi *vsi)
2486 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2487 struct ixl_queue *que = vsi->queues;
2489 for (int i = 0; i < vsi->num_queues; i++, que++) {
2490 struct tx_ring *txr = &que->txr;
2491 struct rx_ring *rxr = &que->rxr;
2493 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2496 ixl_free_que_tx(que);
2498 i40e_free_dma_mem(&sc->hw, &txr->dma);
2500 IXL_TX_LOCK_DESTROY(txr);
2502 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2505 ixl_free_que_rx(que);
2507 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2509 IXL_RX_LOCK_DESTROY(rxr);
2512 free(vsi->queues, M_DEVBUF);
2517 ** ixlv_config_rss - setup RSS
2519 ** RSS keys and table are cleared on VF reset.
2522 ixlv_config_rss(struct ixlv_sc *sc)
2524 struct i40e_hw *hw = &sc->hw;
2525 struct ixl_vsi *vsi = &sc->vsi;
2527 u64 set_hena = 0, hena;
2530 u32 rss_hash_config;
2531 u32 rss_seed[IXL_KEYSZ];
2533 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2534 0x183cfd8c, 0xce880440, 0x580cbc3c,
2535 0x35897377, 0x328b25e1, 0x4fa98922,
2536 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2539 /* Don't set up RSS if using a single queue */
2540 if (vsi->num_queues == 1) {
2541 wr32(hw, I40E_VFQF_HENA(0), 0);
2542 wr32(hw, I40E_VFQF_HENA(1), 0);
2548 /* Fetch the configured RSS key */
2549 rss_getkey((uint8_t *) &rss_seed);
2551 /* Fill out hash function seed */
2552 for (i = 0; i <= IXL_KEYSZ; i++)
2553 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2555 /* Enable PCTYPES for RSS: */
2557 rss_hash_config = rss_gethashconfig();
2558 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2559 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2560 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2561 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2562 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2563 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2564 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2565 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2566 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2567 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2568 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2569 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2570 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2571 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2574 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2575 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2576 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2577 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2578 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2579 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2580 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2581 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2582 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2583 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2584 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2586 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2587 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2589 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2590 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2592 /* Populate the LUT with max no. of queues in round robin fashion */
2593 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2594 if (j == vsi->num_queues)
2598 * Fetch the RSS bucket id for the given indirection entry.
2599 * Cap it at the number of configured buckets (which is
2602 que_id = rss_get_indirection_to_bucket(i);
2603 que_id = que_id % vsi->num_queues;
2607 /* lut = 4-byte sliding window of 4 lut entries */
2608 lut = (lut << 8) | (que_id & 0xF);
2609 /* On i = 3, we have 4 entries in lut; write to the register */
2611 wr32(hw, I40E_VFQF_HLUT(i), lut);
2612 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2620 ** This routine refreshes vlan filters, called by init
2621 ** it scans the filter table and then updates the AQ
2624 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2626 struct ixl_vsi *vsi = &sc->vsi;
2627 struct ixlv_vlan_filter *f;
2630 if (vsi->num_vlans == 0)
2633 ** Scan the filter table for vlan entries,
2634 ** and if found call for the AQ update.
2636 SLIST_FOREACH(f, sc->vlan_filters, next)
2637 if (f->flags & IXL_FILTER_ADD)
2640 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2641 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2646 ** This routine adds new MAC filters to the sc's list;
2647 ** these are later added in hardware by sending a virtual
2651 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2653 struct ixlv_mac_filter *f;
2655 /* Does one already exist? */
2656 f = ixlv_find_mac_filter(sc, macaddr);
2658 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2659 MAC_FORMAT_ARGS(macaddr));
2663 /* If not, get a new empty filter */
2664 f = ixlv_get_mac_filter(sc);
2666 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2671 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2672 MAC_FORMAT_ARGS(macaddr));
2674 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2675 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2681 ** Marks a MAC filter for deletion.
2684 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2686 struct ixlv_mac_filter *f;
2688 f = ixlv_find_mac_filter(sc, macaddr);
2692 f->flags |= IXL_FILTER_DEL;
2697 ** Tasklet handler for MSIX Adminq interrupts
2698 ** - done outside interrupt context since it might sleep
2701 ixlv_do_adminq(void *context, int pending)
2703 struct ixlv_sc *sc = context;
2706 ixlv_do_adminq_locked(sc);
2707 mtx_unlock(&sc->mtx);
2712 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2714 struct i40e_hw *hw = &sc->hw;
2715 struct i40e_arq_event_info event;
2716 struct i40e_virtchnl_msg *v_msg;
2717 device_t dev = sc->dev;
2722 IXLV_CORE_LOCK_ASSERT(sc);
2724 event.buf_len = IXL_AQ_BUF_SZ;
2725 event.msg_buf = sc->aq_buffer;
2726 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2729 ret = i40e_clean_arq_element(hw, &event, &result);
2732 ixlv_vc_completion(sc, v_msg->v_opcode,
2733 v_msg->v_retval, event.msg_buf, event.msg_len);
2735 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2738 /* check for Admin queue errors */
2739 oldreg = reg = rd32(hw, hw->aq.arq.len);
2740 if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
2741 device_printf(dev, "ARQ VF Error detected\n");
2742 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
2744 if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
2745 device_printf(dev, "ARQ Overflow Error detected\n");
2746 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
2748 if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
2749 device_printf(dev, "ARQ Critical Error detected\n");
2750 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
2753 wr32(hw, hw->aq.arq.len, reg);
2755 oldreg = reg = rd32(hw, hw->aq.asq.len);
2756 if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
2757 device_printf(dev, "ASQ VF Error detected\n");
2758 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
2760 if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
2761 device_printf(dev, "ASQ Overflow Error detected\n");
2762 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
2764 if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
2765 device_printf(dev, "ASQ Critical Error detected\n");
2766 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
2769 wr32(hw, hw->aq.asq.len, reg);
2771 ixlv_enable_adminq_irq(hw);
2775 ixlv_add_sysctls(struct ixlv_sc *sc)
2777 device_t dev = sc->dev;
2778 struct ixl_vsi *vsi = &sc->vsi;
2779 struct i40e_eth_stats *es = &vsi->eth_stats;
2781 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2782 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2783 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2785 struct sysctl_oid *vsi_node, *queue_node;
2786 struct sysctl_oid_list *vsi_list, *queue_list;
2788 #define QUEUE_NAME_LEN 32
2789 char queue_namebuf[QUEUE_NAME_LEN];
2791 struct ixl_queue *queues = vsi->queues;
2792 struct tx_ring *txr;
2793 struct rx_ring *rxr;
2795 /* Driver statistics sysctls */
2796 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2797 CTLFLAG_RD, &sc->watchdog_events,
2798 "Watchdog timeouts");
2799 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2800 CTLFLAG_RD, &sc->admin_irq,
2801 "Admin Queue IRQ Handled");
2803 /* VSI statistics sysctls */
2804 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2805 CTLFLAG_RD, NULL, "VSI-specific statistics");
2806 vsi_list = SYSCTL_CHILDREN(vsi_node);
2808 struct ixl_sysctl_info ctls[] =
2810 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2811 {&es->rx_unicast, "ucast_pkts_rcvd",
2812 "Unicast Packets Received"},
2813 {&es->rx_multicast, "mcast_pkts_rcvd",
2814 "Multicast Packets Received"},
2815 {&es->rx_broadcast, "bcast_pkts_rcvd",
2816 "Broadcast Packets Received"},
2817 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2818 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2819 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2820 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2821 {&es->tx_multicast, "mcast_pkts_txd",
2822 "Multicast Packets Transmitted"},
2823 {&es->tx_broadcast, "bcast_pkts_txd",
2824 "Broadcast Packets Transmitted"},
2825 {&es->tx_errors, "tx_errors", "TX packet errors"},
2829 struct ixl_sysctl_info *entry = ctls;
2830 while (entry->stat != 0)
2832 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2833 CTLFLAG_RD, entry->stat,
2834 entry->description);
2839 for (int q = 0; q < vsi->num_queues; q++) {
2840 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2841 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2842 CTLFLAG_RD, NULL, "Queue Name");
2843 queue_list = SYSCTL_CHILDREN(queue_node);
2845 txr = &(queues[q].txr);
2846 rxr = &(queues[q].rxr);
2848 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2849 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2850 "m_defrag() failed");
2851 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2852 CTLFLAG_RD, &(queues[q].dropped_pkts),
2853 "Driver dropped packets");
2854 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2855 CTLFLAG_RD, &(queues[q].irqs),
2856 "irqs on this queue");
2857 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2858 CTLFLAG_RD, &(queues[q].tso),
2860 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2861 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2862 "Driver tx dma failure in xmit");
2863 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2864 CTLFLAG_RD, &(txr->no_desc),
2865 "Queue No Descriptor Available");
2866 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2867 CTLFLAG_RD, &(txr->total_packets),
2868 "Queue Packets Transmitted");
2869 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2870 CTLFLAG_RD, &(txr->tx_bytes),
2871 "Queue Bytes Transmitted");
2872 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2873 CTLFLAG_RD, &(rxr->rx_packets),
2874 "Queue Packets Received");
2875 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2876 CTLFLAG_RD, &(rxr->rx_bytes),
2877 "Queue Bytes Received");
2879 /* Examine queue state */
2880 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2881 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2882 sizeof(struct ixl_queue),
2883 ixlv_sysctl_qtx_tail_handler, "IU",
2884 "Queue Transmit Descriptor Tail");
2885 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2886 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2887 sizeof(struct ixl_queue),
2888 ixlv_sysctl_qrx_tail_handler, "IU",
2889 "Queue Receive Descriptor Tail");
2894 ixlv_init_filters(struct ixlv_sc *sc)
2896 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2897 M_DEVBUF, M_NOWAIT | M_ZERO);
2898 SLIST_INIT(sc->mac_filters);
2899 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2900 M_DEVBUF, M_NOWAIT | M_ZERO);
2901 SLIST_INIT(sc->vlan_filters);
2906 ixlv_free_filters(struct ixlv_sc *sc)
2908 struct ixlv_mac_filter *f;
2909 struct ixlv_vlan_filter *v;
2911 while (!SLIST_EMPTY(sc->mac_filters)) {
2912 f = SLIST_FIRST(sc->mac_filters);
2913 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2916 while (!SLIST_EMPTY(sc->vlan_filters)) {
2917 v = SLIST_FIRST(sc->vlan_filters);
2918 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2925 * ixlv_sysctl_qtx_tail_handler
2926 * Retrieves I40E_QTX_TAIL1 value from hardware
2930 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2932 struct ixl_queue *que;
2936 que = ((struct ixl_queue *)oidp->oid_arg1);
2939 val = rd32(que->vsi->hw, que->txr.tail);
2940 error = sysctl_handle_int(oidp, &val, 0, req);
2941 if (error || !req->newptr)
2947 * ixlv_sysctl_qrx_tail_handler
2948 * Retrieves I40E_QRX_TAIL1 value from hardware
2952 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2954 struct ixl_queue *que;
2958 que = ((struct ixl_queue *)oidp->oid_arg1);
2961 val = rd32(que->vsi->hw, que->rxr.tail);
2962 error = sysctl_handle_int(oidp, &val, 0, req);
2963 if (error || !req->newptr)