1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixlv_driver_version[] = "1.2.6";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixlv_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixlv_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
66 /* required last entry */
70 /*********************************************************************
71 * Table of branding strings
72 *********************************************************************/
74 static char *ixlv_strings[] = {
75 "Intel(R) Ethernet Connection XL710 VF Driver"
79 /*********************************************************************
81 *********************************************************************/
82 static int ixlv_probe(device_t);
83 static int ixlv_attach(device_t);
84 static int ixlv_detach(device_t);
85 static int ixlv_shutdown(device_t);
86 static void ixlv_init_locked(struct ixlv_sc *);
87 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
88 static void ixlv_free_pci_resources(struct ixlv_sc *);
89 static int ixlv_assign_msix(struct ixlv_sc *);
90 static int ixlv_init_msix(struct ixlv_sc *);
91 static int ixlv_init_taskqueue(struct ixlv_sc *);
92 static int ixlv_setup_queues(struct ixlv_sc *);
93 static void ixlv_config_rss(struct ixlv_sc *);
94 static void ixlv_stop(struct ixlv_sc *);
95 static void ixlv_add_multi(struct ixl_vsi *);
96 static void ixlv_del_multi(struct ixl_vsi *);
97 static void ixlv_free_queues(struct ixl_vsi *);
98 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
100 static int ixlv_media_change(struct ifnet *);
101 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
103 static void ixlv_local_timer(void *);
105 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107 static void ixlv_init_filters(struct ixlv_sc *);
108 static void ixlv_free_filters(struct ixlv_sc *);
110 static void ixlv_msix_que(void *);
111 static void ixlv_msix_adminq(void *);
112 static void ixlv_do_adminq(void *, int);
113 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
114 static void ixlv_handle_que(void *, int);
115 static int ixlv_reset(struct ixlv_sc *);
116 static int ixlv_reset_complete(struct i40e_hw *);
117 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
118 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
119 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120 enum i40e_status_code);
122 static void ixlv_enable_adminq_irq(struct i40e_hw *);
123 static void ixlv_disable_adminq_irq(struct i40e_hw *);
124 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
125 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
127 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
128 static void ixlv_register_vlan(void *, struct ifnet *, u16);
129 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
131 static void ixlv_init_hw(struct ixlv_sc *);
132 static int ixlv_setup_vc(struct ixlv_sc *);
133 static int ixlv_vf_config(struct ixlv_sc *);
135 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
136 struct ifnet *, int);
138 static void ixlv_add_sysctls(struct ixlv_sc *);
139 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
140 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
142 /*********************************************************************
143 * FreeBSD Device Interface Entry Points
144 *********************************************************************/
146 static device_method_t ixlv_methods[] = {
147 /* Device interface */
148 DEVMETHOD(device_probe, ixlv_probe),
149 DEVMETHOD(device_attach, ixlv_attach),
150 DEVMETHOD(device_detach, ixlv_detach),
151 DEVMETHOD(device_shutdown, ixlv_shutdown),
155 static driver_t ixlv_driver = {
156 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
159 devclass_t ixlv_devclass;
160 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
162 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
166 ** TUNEABLE PARAMETERS:
169 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
170 "IXLV driver parameters");
173 ** Number of descriptors per ring:
174 ** - TX and RX are the same size
176 static int ixlv_ringsz = DEFAULT_RING;
177 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
178 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
179 &ixlv_ringsz, 0, "Descriptor Ring Size");
181 /* Set to zero to auto calculate */
182 int ixlv_max_queues = 0;
183 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
184 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
185 &ixlv_max_queues, 0, "Number of Queues");
188 ** Number of entries in Tx queue buf_ring.
189 ** Increasing this will reduce the number of
190 ** errors when transmitting fragmented UDP
193 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
194 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
195 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
196 &ixlv_txbrsz, 0, "TX Buf Ring Size");
199 ** Controls for Interrupt Throttling
200 ** - true/false for dynamic adjustment
201 ** - default values for static ITR
203 int ixlv_dynamic_rx_itr = 0;
204 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
205 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
206 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
208 int ixlv_dynamic_tx_itr = 0;
209 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
210 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
211 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
213 int ixlv_rx_itr = IXL_ITR_8K;
214 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
215 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
216 &ixlv_rx_itr, 0, "RX Interrupt Rate");
218 int ixlv_tx_itr = IXL_ITR_4K;
219 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
220 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
221 &ixlv_tx_itr, 0, "TX Interrupt Rate");
224 /*********************************************************************
225 * Device identification routine
227 * ixlv_probe determines if the driver should be loaded on
228 * the hardware based on PCI vendor/device id of the device.
230 * return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
234 ixlv_probe(device_t dev)
236 ixl_vendor_info_t *ent;
238 u16 pci_vendor_id, pci_device_id;
239 u16 pci_subvendor_id, pci_subdevice_id;
240 char device_name[256];
242 INIT_DEBUGOUT("ixlv_probe: begin");
244 pci_vendor_id = pci_get_vendor(dev);
245 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
248 pci_device_id = pci_get_device(dev);
249 pci_subvendor_id = pci_get_subvendor(dev);
250 pci_subdevice_id = pci_get_subdevice(dev);
252 ent = ixlv_vendor_info_array;
253 while (ent->vendor_id != 0) {
254 if ((pci_vendor_id == ent->vendor_id) &&
255 (pci_device_id == ent->device_id) &&
257 ((pci_subvendor_id == ent->subvendor_id) ||
258 (ent->subvendor_id == 0)) &&
260 ((pci_subdevice_id == ent->subdevice_id) ||
261 (ent->subdevice_id == 0))) {
262 sprintf(device_name, "%s, Version - %s",
263 ixlv_strings[ent->index],
264 ixlv_driver_version);
265 device_set_desc_copy(dev, device_name);
266 return (BUS_PROBE_DEFAULT);
273 /*********************************************************************
274 * Device initialization routine
276 * The attach entry point is called when the driver is being loaded.
277 * This routine identifies the type of hardware, allocates all resources
278 * and initializes the hardware.
280 * return 0 on success, positive on failure
281 *********************************************************************/
284 ixlv_attach(device_t dev)
291 INIT_DBG_DEV(dev, "begin");
293 /* Allocate, clear, and link in our primary soft structure */
294 sc = device_get_softc(dev);
295 sc->dev = sc->osdep.dev = dev;
300 /* Initialize hw struct */
303 /* Allocate filter lists */
304 ixlv_init_filters(sc);
307 mtx_init(&sc->mtx, device_get_nameunit(dev),
308 "IXL SC Lock", MTX_DEF);
310 /* Set up the timer callout */
311 callout_init_mtx(&sc->timer, &sc->mtx, 0);
313 /* Do PCI setup - map BAR0, etc */
314 if (ixlv_allocate_pci_resources(sc)) {
315 device_printf(dev, "%s: Allocation of PCI resources failed\n",
321 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
323 error = i40e_set_mac_type(hw);
325 device_printf(dev, "%s: set_mac_type failed: %d\n",
330 error = ixlv_reset_complete(hw);
332 device_printf(dev, "%s: Device is still being reset\n",
337 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
339 error = ixlv_setup_vc(sc);
341 device_printf(dev, "%s: Error setting up PF comms, %d\n",
346 INIT_DBG_DEV(dev, "PF API version verified");
348 /* TODO: Figure out why MDD events occur when this reset is removed. */
349 /* Need API version before sending reset message */
350 error = ixlv_reset(sc);
352 device_printf(dev, "VF reset failed; reload the driver\n");
356 INIT_DBG_DEV(dev, "VF reset complete");
358 /* Ask for VF config from PF */
359 error = ixlv_vf_config(sc);
361 device_printf(dev, "Error getting configuration from PF: %d\n",
366 INIT_DBG_DEV(dev, "VF config from PF:");
367 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
368 sc->vf_res->num_vsis,
369 sc->vf_res->num_queue_pairs,
370 sc->vf_res->max_vectors,
371 sc->vf_res->max_mtu);
372 INIT_DBG_DEV(dev, "Offload flags: %#010x",
373 sc->vf_res->vf_offload_flags);
375 // TODO: Move this into ixlv_vf_config?
376 /* got VF config message back from PF, now we can parse it */
377 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
378 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
379 sc->vsi_res = &sc->vf_res->vsi_res[i];
382 device_printf(dev, "%s: no LAN VSI found\n", __func__);
387 INIT_DBG_DEV(dev, "Resource Acquisition complete");
389 /* If no mac address was assigned just make a random one */
390 if (!ixlv_check_ether_addr(hw->mac.addr)) {
391 u8 addr[ETHER_ADDR_LEN];
392 arc4rand(&addr, sizeof(addr), 0);
395 bcopy(addr, hw->mac.addr, sizeof(addr));
398 vsi->id = sc->vsi_res->vsi_id;
399 vsi->back = (void *)sc;
402 /* This allocates the memory and early settings */
403 if (ixlv_setup_queues(sc) != 0) {
404 device_printf(dev, "%s: setup queues failed!\n",
410 /* Setup the stack interface */
411 if (ixlv_setup_interface(dev, sc) != 0) {
412 device_printf(dev, "%s: setup interface failed!\n",
418 INIT_DBG_DEV(dev, "Queue memory and interface setup");
420 /* Do queue interrupt setup */
421 ixlv_assign_msix(sc);
423 /* Start AdminQ taskqueue */
424 ixlv_init_taskqueue(sc);
426 /* Initialize stats */
427 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
428 ixlv_add_sysctls(sc);
430 /* Register for VLAN events */
431 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
436 /* We want AQ enabled early */
437 ixlv_enable_adminq_irq(hw);
439 /* Set things up to run init */
440 sc->init_state = IXLV_INIT_READY;
442 ixl_vc_init_mgr(sc, &sc->vc_mgr);
444 INIT_DBG_DEV(dev, "end");
448 ixlv_free_queues(vsi);
450 free(sc->vf_res, M_DEVBUF);
452 i40e_shutdown_adminq(hw);
454 ixlv_free_pci_resources(sc);
456 mtx_destroy(&sc->mtx);
457 ixlv_free_filters(sc);
458 INIT_DBG_DEV(dev, "end: error %d", error);
462 /*********************************************************************
463 * Device removal routine
465 * The detach entry point is called when the driver is being removed.
466 * This routine stops the adapter and deallocates all the resources
467 * that were allocated for driver operation.
469 * return 0 on success, positive on failure
470 *********************************************************************/
473 ixlv_detach(device_t dev)
475 struct ixlv_sc *sc = device_get_softc(dev);
476 struct ixl_vsi *vsi = &sc->vsi;
478 INIT_DBG_DEV(dev, "begin");
480 /* Make sure VLANS are not using driver */
481 if (vsi->ifp->if_vlantrunk != NULL) {
482 if_printf(vsi->ifp, "Vlan in use, detach first\n");
483 INIT_DBG_DEV(dev, "end");
488 ether_ifdetach(vsi->ifp);
489 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
492 mtx_unlock(&sc->mtx);
495 /* Unregister VLAN events */
496 if (vsi->vlan_attach != NULL)
497 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
498 if (vsi->vlan_detach != NULL)
499 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
502 callout_drain(&sc->vc_mgr.callout);
504 i40e_shutdown_adminq(&sc->hw);
505 taskqueue_free(sc->tq);
507 free(sc->vf_res, M_DEVBUF);
508 ixlv_free_pci_resources(sc);
509 ixlv_free_queues(vsi);
510 mtx_destroy(&sc->mtx);
511 ixlv_free_filters(sc);
513 bus_generic_detach(dev);
514 INIT_DBG_DEV(dev, "end");
518 /*********************************************************************
520 * Shutdown entry point
522 **********************************************************************/
525 ixlv_shutdown(device_t dev)
527 struct ixlv_sc *sc = device_get_softc(dev);
529 INIT_DBG_DEV(dev, "begin");
533 mtx_unlock(&sc->mtx);
535 INIT_DBG_DEV(dev, "end");
540 * Configure TXCSUM(IPV6) and TSO(4/6)
541 * - the hardware handles these together so we
545 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
547 /* Enable/disable TXCSUM/TSO4 */
548 if (!(ifp->if_capenable & IFCAP_TXCSUM)
549 && !(ifp->if_capenable & IFCAP_TSO4)) {
550 if (mask & IFCAP_TXCSUM) {
551 ifp->if_capenable |= IFCAP_TXCSUM;
552 /* enable TXCSUM, restore TSO if previously enabled */
553 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
554 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555 ifp->if_capenable |= IFCAP_TSO4;
558 else if (mask & IFCAP_TSO4) {
559 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
560 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
562 "TSO4 requires txcsum, enabling both...\n");
564 } else if((ifp->if_capenable & IFCAP_TXCSUM)
565 && !(ifp->if_capenable & IFCAP_TSO4)) {
566 if (mask & IFCAP_TXCSUM)
567 ifp->if_capenable &= ~IFCAP_TXCSUM;
568 else if (mask & IFCAP_TSO4)
569 ifp->if_capenable |= IFCAP_TSO4;
570 } else if((ifp->if_capenable & IFCAP_TXCSUM)
571 && (ifp->if_capenable & IFCAP_TSO4)) {
572 if (mask & IFCAP_TXCSUM) {
573 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
574 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
576 "TSO4 requires txcsum, disabling both...\n");
577 } else if (mask & IFCAP_TSO4)
578 ifp->if_capenable &= ~IFCAP_TSO4;
581 /* Enable/disable TXCSUM_IPV6/TSO6 */
582 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
583 && !(ifp->if_capenable & IFCAP_TSO6)) {
584 if (mask & IFCAP_TXCSUM_IPV6) {
585 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
586 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
587 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
588 ifp->if_capenable |= IFCAP_TSO6;
590 } else if (mask & IFCAP_TSO6) {
591 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
592 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
594 "TSO6 requires txcsum6, enabling both...\n");
596 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
597 && !(ifp->if_capenable & IFCAP_TSO6)) {
598 if (mask & IFCAP_TXCSUM_IPV6)
599 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
600 else if (mask & IFCAP_TSO6)
601 ifp->if_capenable |= IFCAP_TSO6;
602 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603 && (ifp->if_capenable & IFCAP_TSO6)) {
604 if (mask & IFCAP_TXCSUM_IPV6) {
605 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
606 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
608 "TSO6 requires txcsum6, disabling both...\n");
609 } else if (mask & IFCAP_TSO6)
610 ifp->if_capenable &= ~IFCAP_TSO6;
614 /*********************************************************************
617 * ixlv_ioctl is called when the user wants to configure the
620 * return 0 on success, positive on failure
621 **********************************************************************/
624 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
626 struct ixl_vsi *vsi = ifp->if_softc;
627 struct ixlv_sc *sc = vsi->back;
628 struct ifreq *ifr = (struct ifreq *)data;
629 #if defined(INET) || defined(INET6)
630 struct ifaddr *ifa = (struct ifaddr *)data;
631 bool avoid_reset = FALSE;
640 if (ifa->ifa_addr->sa_family == AF_INET)
644 if (ifa->ifa_addr->sa_family == AF_INET6)
647 #if defined(INET) || defined(INET6)
649 ** Calling init results in link renegotiation,
650 ** so we avoid doing it when possible.
653 ifp->if_flags |= IFF_UP;
654 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
657 if (!(ifp->if_flags & IFF_NOARP))
658 arp_ifinit(ifp, ifa);
661 error = ether_ioctl(ifp, command, data);
665 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
667 if (ifr->ifr_mtu > IXL_MAX_FRAME -
668 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
670 IOCTL_DBG_IF(ifp, "mtu too large");
672 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
673 // ERJ: Interestingly enough, these types don't match
674 ifp->if_mtu = (u_long)ifr->ifr_mtu;
675 vsi->max_frame_size =
676 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
677 + ETHER_VLAN_ENCAP_LEN;
678 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
679 ixlv_init_locked(sc);
681 mtx_unlock(&sc->mtx);
684 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
686 if (ifp->if_flags & IFF_UP) {
687 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
688 ixlv_init_locked(sc);
690 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
692 sc->if_flags = ifp->if_flags;
693 mtx_unlock(&sc->mtx);
696 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
697 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
699 ixlv_disable_intr(vsi);
701 ixlv_enable_intr(vsi);
702 mtx_unlock(&sc->mtx);
706 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
707 if (sc->init_state == IXLV_RUNNING) {
709 ixlv_disable_intr(vsi);
711 ixlv_enable_intr(vsi);
712 mtx_unlock(&sc->mtx);
717 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
718 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
722 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
723 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
725 ixlv_cap_txcsum_tso(vsi, ifp, mask);
727 if (mask & IFCAP_RXCSUM)
728 ifp->if_capenable ^= IFCAP_RXCSUM;
729 if (mask & IFCAP_RXCSUM_IPV6)
730 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
731 if (mask & IFCAP_LRO)
732 ifp->if_capenable ^= IFCAP_LRO;
733 if (mask & IFCAP_VLAN_HWTAGGING)
734 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
735 if (mask & IFCAP_VLAN_HWFILTER)
736 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
737 if (mask & IFCAP_VLAN_HWTSO)
738 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
739 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
742 VLAN_CAPABILITIES(ifp);
748 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
749 error = ether_ioctl(ifp, command, data);
757 ** To do a reinit on the VF is unfortunately more complicated
758 ** than a physical device, we must have the PF more or less
759 ** completely recreate our memory, so many things that were
760 ** done only once at attach in traditional drivers now must be
761 ** redone at each reinitialization. This function does that
762 ** 'prelude' so we can then call the normal locked init code.
765 ixlv_reinit_locked(struct ixlv_sc *sc)
767 struct i40e_hw *hw = &sc->hw;
768 struct ixl_vsi *vsi = &sc->vsi;
769 struct ifnet *ifp = vsi->ifp;
770 struct ixlv_mac_filter *mf, *mf_temp;
771 struct ixlv_vlan_filter *vf;
774 INIT_DBG_IF(ifp, "begin");
776 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
779 error = ixlv_reset(sc);
781 INIT_DBG_IF(ifp, "VF was reset");
783 /* set the state in case we went thru RESET */
784 sc->init_state = IXLV_RUNNING;
787 ** Resetting the VF drops all filters from hardware;
788 ** we need to mark them to be re-added in init.
790 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
791 if (mf->flags & IXL_FILTER_DEL) {
792 SLIST_REMOVE(sc->mac_filters, mf,
793 ixlv_mac_filter, next);
796 mf->flags |= IXL_FILTER_ADD;
798 if (vsi->num_vlans != 0)
799 SLIST_FOREACH(vf, sc->vlan_filters, next)
800 vf->flags = IXL_FILTER_ADD;
801 else { /* clean any stale filters */
802 while (!SLIST_EMPTY(sc->vlan_filters)) {
803 vf = SLIST_FIRST(sc->vlan_filters);
804 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
809 ixlv_enable_adminq_irq(hw);
810 ixl_vc_flush(&sc->vc_mgr);
812 INIT_DBG_IF(ifp, "end");
817 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
818 enum i40e_status_code code)
825 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
826 * happens while a command is in progress, so we don't print an error
829 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
830 if_printf(sc->vsi.ifp,
831 "Error %d waiting for PF to complete operation %d\n",
837 ixlv_init_locked(struct ixlv_sc *sc)
839 struct i40e_hw *hw = &sc->hw;
840 struct ixl_vsi *vsi = &sc->vsi;
841 struct ixl_queue *que = vsi->queues;
842 struct ifnet *ifp = vsi->ifp;
845 INIT_DBG_IF(ifp, "begin");
847 IXLV_CORE_LOCK_ASSERT(sc);
849 /* Do a reinit first if an init has already been done */
850 if ((sc->init_state == IXLV_RUNNING) ||
851 (sc->init_state == IXLV_RESET_REQUIRED) ||
852 (sc->init_state == IXLV_RESET_PENDING))
853 error = ixlv_reinit_locked(sc);
854 /* Don't bother with init if we failed reinit */
858 /* Remove existing MAC filter if new MAC addr is set */
859 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
860 error = ixlv_del_mac_filter(sc, hw->mac.addr);
862 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
863 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
867 /* Check for an LAA mac address... */
868 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
870 ifp->if_hwassist = 0;
871 if (ifp->if_capenable & IFCAP_TSO)
872 ifp->if_hwassist |= CSUM_TSO;
873 if (ifp->if_capenable & IFCAP_TXCSUM)
874 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
875 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
876 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
878 /* Add mac filter for this VF to PF */
879 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
880 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
881 if (!error || error == EEXIST)
882 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
883 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
887 /* Setup vlan's if needed */
888 ixlv_setup_vlan_filters(sc);
890 /* Prepare the queues for operation */
891 for (int i = 0; i < vsi->num_queues; i++, que++) {
892 struct rx_ring *rxr = &que->rxr;
894 ixl_init_tx_ring(que);
896 if (vsi->max_frame_size <= MCLBYTES)
897 rxr->mbuf_sz = MCLBYTES;
899 rxr->mbuf_sz = MJUMPAGESIZE;
900 ixl_init_rx_ring(que);
903 /* Configure queues */
904 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
905 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
911 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
912 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
915 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
916 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
918 /* Start the local timer */
919 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
921 sc->init_state = IXLV_RUNNING;
924 INIT_DBG_IF(ifp, "end");
929 ** Init entry point for the stack
934 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
935 struct ixlv_sc *sc = vsi->back;
939 ixlv_init_locked(sc);
940 mtx_unlock(&sc->mtx);
942 /* Wait for init_locked to finish */
943 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
944 && ++retries < 100) {
947 if (retries >= IXLV_AQ_MAX_ERR)
949 "Init failed to complete in alloted time!\n");
953 * ixlv_attach() helper function; gathers information about
954 * the (virtual) hardware for use elsewhere in the driver.
957 ixlv_init_hw(struct ixlv_sc *sc)
959 struct i40e_hw *hw = &sc->hw;
960 device_t dev = sc->dev;
962 /* Save off the information about this board */
963 hw->vendor_id = pci_get_vendor(dev);
964 hw->device_id = pci_get_device(dev);
965 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
966 hw->subsystem_vendor_id =
967 pci_read_config(dev, PCIR_SUBVEND_0, 2);
968 hw->subsystem_device_id =
969 pci_read_config(dev, PCIR_SUBDEV_0, 2);
971 hw->bus.device = pci_get_slot(dev);
972 hw->bus.func = pci_get_function(dev);
976 * ixlv_attach() helper function; initalizes the admin queue
977 * and attempts to establish contact with the PF by
978 * retrying the initial "API version" message several times
979 * or until the PF responds.
982 ixlv_setup_vc(struct ixlv_sc *sc)
984 struct i40e_hw *hw = &sc->hw;
985 device_t dev = sc->dev;
986 int error = 0, ret_error = 0, asq_retries = 0;
987 bool send_api_ver_retried = 0;
989 /* Need to set these AQ paramters before initializing AQ */
990 hw->aq.num_arq_entries = IXL_AQ_LEN;
991 hw->aq.num_asq_entries = IXL_AQ_LEN;
992 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
993 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
995 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
996 /* Initialize admin queue */
997 error = i40e_init_adminq(hw);
999 device_printf(dev, "%s: init_adminq failed: %d\n",
1005 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1008 /* Send VF's API version */
1009 error = ixlv_send_api_ver(sc);
1011 i40e_shutdown_adminq(hw);
1013 device_printf(dev, "%s: unable to send api"
1014 " version to PF on attempt %d, error %d\n",
1015 __func__, i+1, error);
1019 while (!i40e_asq_done(hw)) {
1020 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1021 i40e_shutdown_adminq(hw);
1022 DDPRINTF(dev, "Admin Queue timeout "
1023 "(waiting for send_api_ver), %d more retries...",
1024 IXLV_AQ_MAX_ERR - (i + 1));
1028 i40e_msec_delay(10);
1030 if (asq_retries > IXLV_AQ_MAX_ERR)
1033 INIT_DBG_DEV(dev, "Sent API version message to PF");
1035 /* Verify that the VF accepts the PF's API version */
1036 error = ixlv_verify_api_ver(sc);
1037 if (error == ETIMEDOUT) {
1038 if (!send_api_ver_retried) {
1039 /* Resend message, one more time */
1040 send_api_ver_retried++;
1042 "%s: Timeout while verifying API version on first"
1043 " try!\n", __func__);
1047 "%s: Timeout while verifying API version on second"
1048 " try!\n", __func__);
1055 "%s: Unable to verify API version,"
1056 " error %d\n", __func__, error);
1063 i40e_shutdown_adminq(hw);
1068 * ixlv_attach() helper function; asks the PF for this VF's
1069 * configuration, and saves the information if it receives it.
1072 ixlv_vf_config(struct ixlv_sc *sc)
1074 struct i40e_hw *hw = &sc->hw;
1075 device_t dev = sc->dev;
1076 int bufsz, error = 0, ret_error = 0;
1077 int asq_retries, retried = 0;
1080 error = ixlv_send_vf_config_msg(sc);
1083 "%s: Unable to send VF config request, attempt %d,"
1084 " error %d\n", __func__, retried + 1, error);
1089 while (!i40e_asq_done(hw)) {
1090 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1091 device_printf(dev, "%s: Admin Queue timeout "
1092 "(waiting for send_vf_config_msg), attempt %d\n",
1093 __func__, retried + 1);
1097 i40e_msec_delay(10);
1100 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1104 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1105 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1106 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1109 "%s: Unable to allocate memory for VF configuration"
1110 " message from PF on attempt %d\n", __func__, retried + 1);
1116 /* Check for VF config response */
1117 error = ixlv_get_vf_config(sc);
1118 if (error == ETIMEDOUT) {
1119 /* The 1st time we timeout, send the configuration message again */
1127 "%s: Unable to get VF configuration from PF after %d tries!\n",
1128 __func__, retried + 1);
1134 free(sc->vf_res, M_DEVBUF);
1140 * Allocate MSI/X vectors, setup the AQ vector early
1143 ixlv_init_msix(struct ixlv_sc *sc)
1145 device_t dev = sc->dev;
1146 int rid, want, vectors, queues, available;
1148 rid = PCIR_BAR(IXL_BAR);
1149 sc->msix_mem = bus_alloc_resource_any(dev,
1150 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1151 if (!sc->msix_mem) {
1152 /* May not be enabled */
1153 device_printf(sc->dev,
1154 "Unable to map MSIX table \n");
1158 available = pci_msix_count(dev);
1159 if (available == 0) { /* system has msix disabled */
1160 bus_release_resource(dev, SYS_RES_MEMORY,
1162 sc->msix_mem = NULL;
1166 /* Figure out a reasonable auto config value */
1167 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1169 /* Override with hardcoded value if sane */
1170 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1171 queues = ixlv_max_queues;
1173 /* If we're doing RSS, clamp at the number of RSS buckets */
1174 if (queues > rss_getnumbuckets())
1175 queues = rss_getnumbuckets();
1177 /* Enforce the VF max value */
1178 if (queues > IXLV_MAX_QUEUES)
1179 queues = IXLV_MAX_QUEUES;
1182 ** Want one vector (RX/TX pair) per queue
1183 ** plus an additional for the admin queue.
1186 if (want <= available) /* Have enough */
1189 device_printf(sc->dev,
1190 "MSIX Configuration Problem, "
1191 "%d vectors available but %d wanted!\n",
1198 * If we're doing RSS, the number of queues needs to
1199 * match the number of RSS buckets that are configured.
1201 * + If there's more queues than RSS buckets, we'll end
1202 * up with queues that get no traffic.
1204 * + If there's more RSS buckets than queues, we'll end
1205 * up having multiple RSS buckets map to the same queue,
1206 * so there'll be some contention.
1208 if (queues != rss_getnumbuckets()) {
1210 "%s: queues (%d) != RSS buckets (%d)"
1211 "; performance will be impacted.\n",
1212 __func__, queues, rss_getnumbuckets());
1216 if (pci_alloc_msix(dev, &vectors) == 0) {
1217 device_printf(sc->dev,
1218 "Using MSIX interrupts with %d vectors\n", vectors);
1220 sc->vsi.num_queues = queues;
1224 ** Explicitly set the guest PCI BUSMASTER capability
1225 ** and we must rewrite the ENABLE in the MSIX control
1226 ** register again at this point to cause the host to
1227 ** successfully initialize us.
1232 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1233 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1234 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1235 pci_find_cap(dev, PCIY_MSIX, &rid);
1236 rid += PCIR_MSIX_CTRL;
1237 msix_ctrl = pci_read_config(dev, rid, 2);
1238 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1239 pci_write_config(dev, rid, msix_ctrl, 2);
1242 /* Next we need to setup the vector for the Admin Queue */
1243 rid = 1; // zero vector + 1
1244 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1245 &rid, RF_SHAREABLE | RF_ACTIVE);
1246 if (sc->res == NULL) {
1247 device_printf(dev,"Unable to allocate"
1248 " bus resource: AQ interrupt \n");
1251 if (bus_setup_intr(dev, sc->res,
1252 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1253 ixlv_msix_adminq, sc, &sc->tag)) {
1255 device_printf(dev, "Failed to register AQ handler");
1258 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1263 /* The VF driver MUST use MSIX */
1268 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1271 device_t dev = sc->dev;
1274 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1277 if (!(sc->pci_mem)) {
1278 device_printf(dev,"Unable to allocate bus resource: memory\n");
1282 sc->osdep.mem_bus_space_tag =
1283 rman_get_bustag(sc->pci_mem);
1284 sc->osdep.mem_bus_space_handle =
1285 rman_get_bushandle(sc->pci_mem);
1286 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1287 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1288 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1290 sc->hw.back = &sc->osdep;
1292 /* Disable adminq interrupts */
1293 ixlv_disable_adminq_irq(&sc->hw);
1296 ** Now setup MSI/X, it will return
1297 ** us the number of supported vectors
1299 sc->msix = ixlv_init_msix(sc);
1301 /* We fail without MSIX support */
1309 ixlv_free_pci_resources(struct ixlv_sc *sc)
1311 struct ixl_vsi *vsi = &sc->vsi;
1312 struct ixl_queue *que = vsi->queues;
1313 device_t dev = sc->dev;
1315 /* We may get here before stations are setup */
1320 ** Release all msix queue resources:
1322 for (int i = 0; i < vsi->num_queues; i++, que++) {
1323 int rid = que->msix + 1;
1324 if (que->tag != NULL) {
1325 bus_teardown_intr(dev, que->res, que->tag);
1328 if (que->res != NULL)
1329 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1333 /* Clean the AdminQ interrupt */
1334 if (sc->tag != NULL) {
1335 bus_teardown_intr(dev, sc->res, sc->tag);
1338 if (sc->res != NULL)
1339 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1341 pci_release_msi(dev);
1343 if (sc->msix_mem != NULL)
1344 bus_release_resource(dev, SYS_RES_MEMORY,
1345 PCIR_BAR(IXL_BAR), sc->msix_mem);
1347 if (sc->pci_mem != NULL)
1348 bus_release_resource(dev, SYS_RES_MEMORY,
1349 PCIR_BAR(0), sc->pci_mem);
1355 * Create taskqueue and tasklet for Admin Queue interrupts.
1358 ixlv_init_taskqueue(struct ixlv_sc *sc)
1362 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1364 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1365 taskqueue_thread_enqueue, &sc->tq);
1366 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1367 device_get_nameunit(sc->dev));
1372 /*********************************************************************
1374 * Setup MSIX Interrupt resources and handlers for the VSI queues
1376 **********************************************************************/
1378 ixlv_assign_msix(struct ixlv_sc *sc)
1380 device_t dev = sc->dev;
1381 struct ixl_vsi *vsi = &sc->vsi;
1382 struct ixl_queue *que = vsi->queues;
1383 struct tx_ring *txr;
1384 int error, rid, vector = 1;
1386 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1390 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1391 RF_SHAREABLE | RF_ACTIVE);
1392 if (que->res == NULL) {
1393 device_printf(dev,"Unable to allocate"
1394 " bus resource: que interrupt [%d]\n", vector);
1397 /* Set the handler function */
1398 error = bus_setup_intr(dev, que->res,
1399 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1400 ixlv_msix_que, que, &que->tag);
1403 device_printf(dev, "Failed to register que handler");
1406 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1407 /* Bind the vector to a CPU */
1409 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1411 bus_bind_intr(dev, que->res, cpu_id);
1413 vsi->que_mask |= (u64)(1 << que->msix);
1414 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1415 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1416 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1417 taskqueue_thread_enqueue, &que->tq);
1419 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1420 cpu_id, "%s (bucket %d)",
1421 device_get_nameunit(dev), cpu_id);
1423 taskqueue_start_threads(&que->tq, 1, PI_NET,
1424 "%s que", device_get_nameunit(dev));
1433 ** Requests a VF reset from the PF.
1435 ** Requires the VF's Admin Queue to be initialized.
1438 ixlv_reset(struct ixlv_sc *sc)
1440 struct i40e_hw *hw = &sc->hw;
1441 device_t dev = sc->dev;
1444 /* Ask the PF to reset us if we are initiating */
1445 if (sc->init_state != IXLV_RESET_PENDING)
1446 ixlv_request_reset(sc);
1448 i40e_msec_delay(100);
1449 error = ixlv_reset_complete(hw);
1451 device_printf(dev, "%s: VF reset failed\n",
1456 error = i40e_shutdown_adminq(hw);
1458 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1463 error = i40e_init_adminq(hw);
1465 device_printf(dev, "%s: init_adminq failed: %d\n",
1474 ixlv_reset_complete(struct i40e_hw *hw)
1478 for (int i = 0; i < 100; i++) {
1479 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1480 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1482 if ((reg == I40E_VFR_VFACTIVE) ||
1483 (reg == I40E_VFR_COMPLETED))
1485 i40e_msec_delay(100);
1492 /*********************************************************************
1494 * Setup networking device structure and register an interface.
1496 **********************************************************************/
1498 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1501 struct ixl_vsi *vsi = &sc->vsi;
1502 struct ixl_queue *que = vsi->queues;
1504 INIT_DBG_DEV(dev, "begin");
1506 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1508 device_printf(dev, "%s: could not allocate ifnet"
1509 " structure!\n", __func__);
1513 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1515 ifp->if_mtu = ETHERMTU;
1516 ifp->if_baudrate = 4000000000; // ??
1517 ifp->if_init = ixlv_init;
1518 ifp->if_softc = vsi;
1519 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1520 ifp->if_ioctl = ixlv_ioctl;
1522 #if __FreeBSD_version >= 1100000
1523 if_setgetcounterfn(ifp, ixl_get_counter);
1526 ifp->if_transmit = ixl_mq_start;
1528 ifp->if_qflush = ixl_qflush;
1529 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1531 ether_ifattach(ifp, sc->hw.mac.addr);
1533 vsi->max_frame_size =
1534 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1535 + ETHER_VLAN_ENCAP_LEN;
1538 * Tell the upper layer(s) we support long frames.
1540 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1542 ifp->if_capabilities |= IFCAP_HWCSUM;
1543 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1544 ifp->if_capabilities |= IFCAP_TSO;
1545 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1547 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1552 ifp->if_capenable = ifp->if_capabilities;
1555 ** Don't turn this on by default, if vlans are
1556 ** created on another pseudo device (eg. lagg)
1557 ** then vlan events are not passed thru, breaking
1558 ** operation, but with HW FILTER off it works. If
1559 ** using vlans directly on the ixl driver you can
1560 ** enable this and get full hardware tag filtering.
1562 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1565 * Specify the media types supported by this adapter and register
1566 * callbacks to update media and link information
1568 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1571 // JFV Add media types later?
1573 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1574 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1576 INIT_DBG_DEV(dev, "end");
1581 ** Allocate and setup the interface queues
1584 ixlv_setup_queues(struct ixlv_sc *sc)
1586 device_t dev = sc->dev;
1587 struct ixl_vsi *vsi;
1588 struct ixl_queue *que;
1589 struct tx_ring *txr;
1590 struct rx_ring *rxr;
1592 int error = I40E_SUCCESS;
1595 vsi->back = (void *)sc;
1599 /* Get memory for the station queues */
1601 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1602 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1603 device_printf(dev, "Unable to allocate queue memory\n");
1608 for (int i = 0; i < vsi->num_queues; i++) {
1609 que = &vsi->queues[i];
1610 que->num_desc = ixlv_ringsz;
1613 /* mark the queue as active */
1614 vsi->active_queues |= (u64)1 << que->me;
1618 txr->tail = I40E_QTX_TAIL1(que->me);
1619 /* Initialize the TX lock */
1620 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1621 device_get_nameunit(dev), que->me);
1622 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1624 ** Create the TX descriptor ring, the extra int is
1625 ** added as the location for HEAD WB.
1627 tsize = roundup2((que->num_desc *
1628 sizeof(struct i40e_tx_desc)) +
1629 sizeof(u32), DBA_ALIGN);
1630 if (i40e_allocate_dma_mem(&sc->hw,
1631 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1633 "Unable to allocate TX Descriptor memory\n");
1637 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1638 bzero((void *)txr->base, tsize);
1639 /* Now allocate transmit soft structs for the ring */
1640 if (ixl_allocate_tx_data(que)) {
1642 "Critical Failure setting up TX structures\n");
1646 /* Allocate a buf ring */
1647 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1648 M_WAITOK, &txr->mtx);
1649 if (txr->br == NULL) {
1651 "Critical Failure setting up TX buf ring\n");
1657 * Next the RX queues...
1659 rsize = roundup2(que->num_desc *
1660 sizeof(union i40e_rx_desc), DBA_ALIGN);
1663 rxr->tail = I40E_QRX_TAIL1(que->me);
1665 /* Initialize the RX side lock */
1666 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1667 device_get_nameunit(dev), que->me);
1668 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1670 if (i40e_allocate_dma_mem(&sc->hw,
1671 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1673 "Unable to allocate RX Descriptor memory\n");
1677 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1678 bzero((void *)rxr->base, rsize);
1680 /* Allocate receive soft structs for the ring*/
1681 if (ixl_allocate_rx_data(que)) {
1683 "Critical Failure setting up receive structs\n");
1692 for (int i = 0; i < vsi->num_queues; i++) {
1693 que = &vsi->queues[i];
1697 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1699 i40e_free_dma_mem(&sc->hw, &txr->dma);
1701 free(vsi->queues, M_DEVBUF);
1708 ** This routine is run via an vlan config EVENT,
1709 ** it enables us to use the HW Filter table since
1710 ** we can get the vlan id. This just creates the
1711 ** entry in the soft version of the VFTA, init will
1712 ** repopulate the real table.
1715 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1717 struct ixl_vsi *vsi = arg;
1718 struct ixlv_sc *sc = vsi->back;
1719 struct ixlv_vlan_filter *v;
1722 if (ifp->if_softc != arg) /* Not our event */
1725 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1728 /* Sanity check - make sure it doesn't already exist */
1729 SLIST_FOREACH(v, sc->vlan_filters, next) {
1730 if (v->vlan == vtag)
1736 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1737 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1739 v->flags = IXL_FILTER_ADD;
1740 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1741 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1742 mtx_unlock(&sc->mtx);
1747 ** This routine is run via an vlan
1748 ** unconfig EVENT, remove our entry
1749 ** in the soft vfta.
1752 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1754 struct ixl_vsi *vsi = arg;
1755 struct ixlv_sc *sc = vsi->back;
1756 struct ixlv_vlan_filter *v;
1759 if (ifp->if_softc != arg)
1762 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1766 SLIST_FOREACH(v, sc->vlan_filters, next) {
1767 if (v->vlan == vtag) {
1768 v->flags = IXL_FILTER_DEL;
1774 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1775 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1776 mtx_unlock(&sc->mtx);
1781 ** Get a new filter and add it to the mac filter list.
1783 static struct ixlv_mac_filter *
1784 ixlv_get_mac_filter(struct ixlv_sc *sc)
1786 struct ixlv_mac_filter *f;
1788 f = malloc(sizeof(struct ixlv_mac_filter),
1789 M_DEVBUF, M_NOWAIT | M_ZERO);
1791 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1797 ** Find the filter with matching MAC address
1799 static struct ixlv_mac_filter *
1800 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1802 struct ixlv_mac_filter *f;
1805 SLIST_FOREACH(f, sc->mac_filters, next) {
1806 if (cmp_etheraddr(f->macaddr, macaddr)) {
1818 ** Admin Queue interrupt handler
1821 ixlv_msix_adminq(void *arg)
1823 struct ixlv_sc *sc = arg;
1824 struct i40e_hw *hw = &sc->hw;
1827 reg = rd32(hw, I40E_VFINT_ICR01);
1828 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1830 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1831 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1832 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1835 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1840 ixlv_enable_intr(struct ixl_vsi *vsi)
1842 struct i40e_hw *hw = vsi->hw;
1843 struct ixl_queue *que = vsi->queues;
1845 ixlv_enable_adminq_irq(hw);
1846 for (int i = 0; i < vsi->num_queues; i++, que++)
1847 ixlv_enable_queue_irq(hw, que->me);
1851 ixlv_disable_intr(struct ixl_vsi *vsi)
1853 struct i40e_hw *hw = vsi->hw;
1854 struct ixl_queue *que = vsi->queues;
1856 ixlv_disable_adminq_irq(hw);
1857 for (int i = 0; i < vsi->num_queues; i++, que++)
1858 ixlv_disable_queue_irq(hw, que->me);
1863 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1865 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1866 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1868 rd32(hw, I40E_VFGEN_RSTAT);
1873 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1875 wr32(hw, I40E_VFINT_DYN_CTL01,
1876 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1877 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1878 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1880 rd32(hw, I40E_VFGEN_RSTAT);
1885 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1889 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1890 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
1891 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1895 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1897 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1898 rd32(hw, I40E_VFGEN_RSTAT);
1904 ** Provide a update to the queue RX
1905 ** interrupt moderation value.
1908 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1910 struct ixl_vsi *vsi = que->vsi;
1911 struct i40e_hw *hw = vsi->hw;
1912 struct rx_ring *rxr = &que->rxr;
1918 /* Idle, do nothing */
1919 if (rxr->bytes == 0)
1922 if (ixlv_dynamic_rx_itr) {
1923 rx_bytes = rxr->bytes/rxr->itr;
1926 /* Adjust latency range */
1927 switch (rxr->latency) {
1928 case IXL_LOW_LATENCY:
1929 if (rx_bytes > 10) {
1930 rx_latency = IXL_AVE_LATENCY;
1931 rx_itr = IXL_ITR_20K;
1934 case IXL_AVE_LATENCY:
1935 if (rx_bytes > 20) {
1936 rx_latency = IXL_BULK_LATENCY;
1937 rx_itr = IXL_ITR_8K;
1938 } else if (rx_bytes <= 10) {
1939 rx_latency = IXL_LOW_LATENCY;
1940 rx_itr = IXL_ITR_100K;
1943 case IXL_BULK_LATENCY:
1944 if (rx_bytes <= 20) {
1945 rx_latency = IXL_AVE_LATENCY;
1946 rx_itr = IXL_ITR_20K;
1951 rxr->latency = rx_latency;
1953 if (rx_itr != rxr->itr) {
1954 /* do an exponential smoothing */
1955 rx_itr = (10 * rx_itr * rxr->itr) /
1956 ((9 * rx_itr) + rxr->itr);
1957 rxr->itr = rx_itr & IXL_MAX_ITR;
1958 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1959 que->me), rxr->itr);
1961 } else { /* We may have have toggled to non-dynamic */
1962 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1963 vsi->rx_itr_setting = ixlv_rx_itr;
1964 /* Update the hardware if needed */
1965 if (rxr->itr != vsi->rx_itr_setting) {
1966 rxr->itr = vsi->rx_itr_setting;
1967 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1968 que->me), rxr->itr);
1978 ** Provide a update to the queue TX
1979 ** interrupt moderation value.
1982 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1984 struct ixl_vsi *vsi = que->vsi;
1985 struct i40e_hw *hw = vsi->hw;
1986 struct tx_ring *txr = &que->txr;
1992 /* Idle, do nothing */
1993 if (txr->bytes == 0)
1996 if (ixlv_dynamic_tx_itr) {
1997 tx_bytes = txr->bytes/txr->itr;
2000 switch (txr->latency) {
2001 case IXL_LOW_LATENCY:
2002 if (tx_bytes > 10) {
2003 tx_latency = IXL_AVE_LATENCY;
2004 tx_itr = IXL_ITR_20K;
2007 case IXL_AVE_LATENCY:
2008 if (tx_bytes > 20) {
2009 tx_latency = IXL_BULK_LATENCY;
2010 tx_itr = IXL_ITR_8K;
2011 } else if (tx_bytes <= 10) {
2012 tx_latency = IXL_LOW_LATENCY;
2013 tx_itr = IXL_ITR_100K;
2016 case IXL_BULK_LATENCY:
2017 if (tx_bytes <= 20) {
2018 tx_latency = IXL_AVE_LATENCY;
2019 tx_itr = IXL_ITR_20K;
2024 txr->latency = tx_latency;
2026 if (tx_itr != txr->itr) {
2027 /* do an exponential smoothing */
2028 tx_itr = (10 * tx_itr * txr->itr) /
2029 ((9 * tx_itr) + txr->itr);
2030 txr->itr = tx_itr & IXL_MAX_ITR;
2031 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2032 que->me), txr->itr);
2035 } else { /* We may have have toggled to non-dynamic */
2036 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2037 vsi->tx_itr_setting = ixlv_tx_itr;
2038 /* Update the hardware if needed */
2039 if (txr->itr != vsi->tx_itr_setting) {
2040 txr->itr = vsi->tx_itr_setting;
2041 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2042 que->me), txr->itr);
2053 ** MSIX Interrupt Handlers and Tasklets
2057 ixlv_handle_que(void *context, int pending)
2059 struct ixl_queue *que = context;
2060 struct ixl_vsi *vsi = que->vsi;
2061 struct i40e_hw *hw = vsi->hw;
2062 struct tx_ring *txr = &que->txr;
2063 struct ifnet *ifp = vsi->ifp;
2066 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2067 more = ixl_rxeof(que, IXL_RX_LIMIT);
2068 mtx_lock(&txr->mtx);
2070 if (!drbr_empty(ifp, txr->br))
2071 ixl_mq_start_locked(ifp, txr);
2072 mtx_unlock(&txr->mtx);
2074 taskqueue_enqueue(que->tq, &que->task);
2079 /* Reenable this interrupt - hmmm */
2080 ixlv_enable_queue_irq(hw, que->me);
2085 /*********************************************************************
2087 * MSIX Queue Interrupt Service routine
2089 **********************************************************************/
2091 ixlv_msix_que(void *arg)
2093 struct ixl_queue *que = arg;
2094 struct ixl_vsi *vsi = que->vsi;
2095 struct i40e_hw *hw = vsi->hw;
2096 struct tx_ring *txr = &que->txr;
2097 bool more_tx, more_rx;
2099 /* Spurious interrupts are ignored */
2100 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2105 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2107 mtx_lock(&txr->mtx);
2108 more_tx = ixl_txeof(que);
2110 ** Make certain that if the stack
2111 ** has anything queued the task gets
2112 ** scheduled to handle it.
2114 if (!drbr_empty(vsi->ifp, txr->br))
2116 mtx_unlock(&txr->mtx);
2118 ixlv_set_queue_rx_itr(que);
2119 ixlv_set_queue_tx_itr(que);
2121 if (more_tx || more_rx)
2122 taskqueue_enqueue(que->tq, &que->task);
2124 ixlv_enable_queue_irq(hw, que->me);
2130 /*********************************************************************
2132 * Media Ioctl callback
2134 * This routine is called whenever the user queries the status of
2135 * the interface using ifconfig.
2137 **********************************************************************/
2139 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2141 struct ixl_vsi *vsi = ifp->if_softc;
2142 struct ixlv_sc *sc = vsi->back;
2144 INIT_DBG_IF(ifp, "begin");
2148 ixlv_update_link_status(sc);
2150 ifmr->ifm_status = IFM_AVALID;
2151 ifmr->ifm_active = IFM_ETHER;
2154 mtx_unlock(&sc->mtx);
2155 INIT_DBG_IF(ifp, "end: link not up");
2159 ifmr->ifm_status |= IFM_ACTIVE;
2160 /* Hardware is always full-duplex */
2161 ifmr->ifm_active |= IFM_FDX;
2162 mtx_unlock(&sc->mtx);
2163 INIT_DBG_IF(ifp, "end");
2167 /*********************************************************************
2169 * Media Ioctl callback
2171 * This routine is called when the user changes speed/duplex using
2172 * media/mediopt option with ifconfig.
2174 **********************************************************************/
2176 ixlv_media_change(struct ifnet * ifp)
2178 struct ixl_vsi *vsi = ifp->if_softc;
2179 struct ifmedia *ifm = &vsi->media;
2181 INIT_DBG_IF(ifp, "begin");
2183 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2186 INIT_DBG_IF(ifp, "end");
2191 /*********************************************************************
2192 * Multicast Initialization
2194 * This routine is called by init to reset a fresh state.
2196 **********************************************************************/
2199 ixlv_init_multi(struct ixl_vsi *vsi)
2201 struct ixlv_mac_filter *f;
2202 struct ixlv_sc *sc = vsi->back;
2205 IOCTL_DBG_IF(vsi->ifp, "begin");
2207 /* First clear any multicast filters */
2208 SLIST_FOREACH(f, sc->mac_filters, next) {
2209 if ((f->flags & IXL_FILTER_USED)
2210 && (f->flags & IXL_FILTER_MC)) {
2211 f->flags |= IXL_FILTER_DEL;
2216 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2217 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2220 IOCTL_DBG_IF(vsi->ifp, "end");
2224 ixlv_add_multi(struct ixl_vsi *vsi)
2226 struct ifmultiaddr *ifma;
2227 struct ifnet *ifp = vsi->ifp;
2228 struct ixlv_sc *sc = vsi->back;
2231 IOCTL_DBG_IF(ifp, "begin");
2233 if_maddr_rlock(ifp);
2235 ** Get a count, to decide if we
2236 ** simply use multicast promiscuous.
2238 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2239 if (ifma->ifma_addr->sa_family != AF_LINK)
2243 if_maddr_runlock(ifp);
2245 // TODO: Remove -- cannot set promiscuous mode in a VF
2246 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2247 /* delete all multicast filters */
2248 ixlv_init_multi(vsi);
2249 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2250 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2251 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2253 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2258 if_maddr_rlock(ifp);
2259 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2260 if (ifma->ifma_addr->sa_family != AF_LINK)
2262 if (!ixlv_add_mac_filter(sc,
2263 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2267 if_maddr_runlock(ifp);
2269 ** Notify AQ task that sw filters need to be
2273 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2274 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2277 IOCTL_DBG_IF(ifp, "end");
2281 ixlv_del_multi(struct ixl_vsi *vsi)
2283 struct ixlv_mac_filter *f;
2284 struct ifmultiaddr *ifma;
2285 struct ifnet *ifp = vsi->ifp;
2286 struct ixlv_sc *sc = vsi->back;
2290 IOCTL_DBG_IF(ifp, "begin");
2292 /* Search for removed multicast addresses */
2293 if_maddr_rlock(ifp);
2294 SLIST_FOREACH(f, sc->mac_filters, next) {
2295 if ((f->flags & IXL_FILTER_USED)
2296 && (f->flags & IXL_FILTER_MC)) {
2297 /* check if mac address in filter is in sc's list */
2299 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2300 if (ifma->ifma_addr->sa_family != AF_LINK)
2303 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2304 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2309 /* if this filter is not in the sc's list, remove it */
2310 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2311 f->flags |= IXL_FILTER_DEL;
2313 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2314 MAC_FORMAT_ARGS(f->macaddr));
2316 else if (match == FALSE)
2317 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2318 MAC_FORMAT_ARGS(f->macaddr));
2321 if_maddr_runlock(ifp);
2324 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2325 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2328 IOCTL_DBG_IF(ifp, "end");
2331 /*********************************************************************
2334 * This routine checks for link status,updates statistics,
2335 * and runs the watchdog check.
2337 **********************************************************************/
2340 ixlv_local_timer(void *arg)
2342 struct ixlv_sc *sc = arg;
2343 struct i40e_hw *hw = &sc->hw;
2344 struct ixl_vsi *vsi = &sc->vsi;
2345 struct ixl_queue *que = vsi->queues;
2346 device_t dev = sc->dev;
2350 IXLV_CORE_LOCK_ASSERT(sc);
2352 /* If Reset is in progress just bail */
2353 if (sc->init_state == IXLV_RESET_PENDING)
2356 /* Check for when PF triggers a VF reset */
2357 val = rd32(hw, I40E_VFGEN_RSTAT) &
2358 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2360 if (val != I40E_VFR_VFACTIVE
2361 && val != I40E_VFR_COMPLETED) {
2362 DDPRINTF(dev, "reset in progress! (%d)", val);
2366 ixlv_request_stats(sc);
2368 /* clean and process any events */
2369 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2372 ** Check status on the queues for a hang
2374 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2375 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2377 for (int i = 0; i < vsi->num_queues; i++,que++) {
2378 /* Any queues with outstanding work get a sw irq */
2380 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2382 ** Each time txeof runs without cleaning, but there
2383 ** are uncleaned descriptors it increments busy. If
2384 ** we get to 5 we declare it hung.
2386 if (que->busy == IXL_QUEUE_HUNG) {
2388 /* Mark the queue as inactive */
2389 vsi->active_queues &= ~((u64)1 << que->me);
2392 /* Check if we've come back from hung */
2393 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2394 vsi->active_queues |= ((u64)1 << que->me);
2396 if (que->busy >= IXL_MAX_TX_BUSY) {
2397 device_printf(dev,"Warning queue %d "
2398 "appears to be hung!\n", i);
2399 que->busy = IXL_QUEUE_HUNG;
2403 /* Only reset when all queues show hung */
2404 if (hung == vsi->num_queues)
2406 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2410 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2411 sc->init_state = IXLV_RESET_REQUIRED;
2412 ixlv_init_locked(sc);
2416 ** Note: this routine updates the OS on the link state
2417 ** the real check of the hardware only happens with
2418 ** a link interrupt.
2421 ixlv_update_link_status(struct ixlv_sc *sc)
2423 struct ixl_vsi *vsi = &sc->vsi;
2424 struct ifnet *ifp = vsi->ifp;
2427 if (vsi->link_active == FALSE) {
2429 if_printf(ifp,"Link is Up, %d Gbps\n",
2430 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2431 vsi->link_active = TRUE;
2432 if_link_state_change(ifp, LINK_STATE_UP);
2434 } else { /* Link down */
2435 if (vsi->link_active == TRUE) {
2437 if_printf(ifp,"Link is Down\n");
2438 if_link_state_change(ifp, LINK_STATE_DOWN);
2439 vsi->link_active = FALSE;
2446 /*********************************************************************
2448 * This routine disables all traffic on the adapter by issuing a
2449 * global reset on the MAC and deallocates TX/RX buffers.
2451 **********************************************************************/
2454 ixlv_stop(struct ixlv_sc *sc)
2460 INIT_DBG_IF(ifp, "begin");
2462 IXLV_CORE_LOCK_ASSERT(sc);
2464 ixl_vc_flush(&sc->vc_mgr);
2465 ixlv_disable_queues(sc);
2468 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2469 ((ticks - start) < hz/10))
2470 ixlv_do_adminq_locked(sc);
2472 /* Stop the local timer */
2473 callout_stop(&sc->timer);
2475 INIT_DBG_IF(ifp, "end");
2479 /*********************************************************************
2481 * Free all station queue structs.
2483 **********************************************************************/
2485 ixlv_free_queues(struct ixl_vsi *vsi)
2487 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2488 struct ixl_queue *que = vsi->queues;
2490 for (int i = 0; i < vsi->num_queues; i++, que++) {
2491 struct tx_ring *txr = &que->txr;
2492 struct rx_ring *rxr = &que->rxr;
2494 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2497 ixl_free_que_tx(que);
2499 i40e_free_dma_mem(&sc->hw, &txr->dma);
2501 IXL_TX_LOCK_DESTROY(txr);
2503 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2506 ixl_free_que_rx(que);
2508 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2510 IXL_RX_LOCK_DESTROY(rxr);
2513 free(vsi->queues, M_DEVBUF);
2518 ** ixlv_config_rss - setup RSS
2520 ** RSS keys and table are cleared on VF reset.
2523 ixlv_config_rss(struct ixlv_sc *sc)
2525 struct i40e_hw *hw = &sc->hw;
2526 struct ixl_vsi *vsi = &sc->vsi;
2528 u64 set_hena = 0, hena;
2531 u32 rss_hash_config;
2532 u32 rss_seed[IXL_KEYSZ];
2534 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2535 0x183cfd8c, 0xce880440, 0x580cbc3c,
2536 0x35897377, 0x328b25e1, 0x4fa98922,
2537 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2540 /* Don't set up RSS if using a single queue */
2541 if (vsi->num_queues == 1) {
2542 wr32(hw, I40E_VFQF_HENA(0), 0);
2543 wr32(hw, I40E_VFQF_HENA(1), 0);
2549 /* Fetch the configured RSS key */
2550 rss_getkey((uint8_t *) &rss_seed);
2552 /* Fill out hash function seed */
2553 for (i = 0; i <= IXL_KEYSZ; i++)
2554 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2556 /* Enable PCTYPES for RSS: */
2558 rss_hash_config = rss_gethashconfig();
2559 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2560 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2561 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2562 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2563 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2564 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2565 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2566 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2567 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2568 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2569 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2570 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2571 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2572 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2575 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2576 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2577 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2578 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2579 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2580 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2581 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2582 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2583 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2584 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2585 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2587 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2588 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2590 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2591 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2593 /* Populate the LUT with max no. of queues in round robin fashion */
2594 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2595 if (j == vsi->num_queues)
2599 * Fetch the RSS bucket id for the given indirection entry.
2600 * Cap it at the number of configured buckets (which is
2603 que_id = rss_get_indirection_to_bucket(i);
2604 que_id = que_id % vsi->num_queues;
2608 /* lut = 4-byte sliding window of 4 lut entries */
2609 lut = (lut << 8) | (que_id & 0xF);
2610 /* On i = 3, we have 4 entries in lut; write to the register */
2612 wr32(hw, I40E_VFQF_HLUT(i), lut);
2613 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2621 ** This routine refreshes vlan filters, called by init
2622 ** it scans the filter table and then updates the AQ
2625 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2627 struct ixl_vsi *vsi = &sc->vsi;
2628 struct ixlv_vlan_filter *f;
2631 if (vsi->num_vlans == 0)
2634 ** Scan the filter table for vlan entries,
2635 ** and if found call for the AQ update.
2637 SLIST_FOREACH(f, sc->vlan_filters, next)
2638 if (f->flags & IXL_FILTER_ADD)
2641 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2642 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2647 ** This routine adds new MAC filters to the sc's list;
2648 ** these are later added in hardware by sending a virtual
2652 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2654 struct ixlv_mac_filter *f;
2656 /* Does one already exist? */
2657 f = ixlv_find_mac_filter(sc, macaddr);
2659 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2660 MAC_FORMAT_ARGS(macaddr));
2664 /* If not, get a new empty filter */
2665 f = ixlv_get_mac_filter(sc);
2667 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2672 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2673 MAC_FORMAT_ARGS(macaddr));
2675 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2676 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2682 ** Marks a MAC filter for deletion.
2685 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2687 struct ixlv_mac_filter *f;
2689 f = ixlv_find_mac_filter(sc, macaddr);
2693 f->flags |= IXL_FILTER_DEL;
2698 ** Tasklet handler for MSIX Adminq interrupts
2699 ** - done outside interrupt context since it might sleep
2702 ixlv_do_adminq(void *context, int pending)
2704 struct ixlv_sc *sc = context;
2707 ixlv_do_adminq_locked(sc);
2708 mtx_unlock(&sc->mtx);
2713 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2715 struct i40e_hw *hw = &sc->hw;
2716 struct i40e_arq_event_info event;
2717 struct i40e_virtchnl_msg *v_msg;
2718 device_t dev = sc->dev;
2723 IXLV_CORE_LOCK_ASSERT(sc);
2725 event.buf_len = IXL_AQ_BUF_SZ;
2726 event.msg_buf = sc->aq_buffer;
2727 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2730 ret = i40e_clean_arq_element(hw, &event, &result);
2733 ixlv_vc_completion(sc, v_msg->v_opcode,
2734 v_msg->v_retval, event.msg_buf, event.msg_len);
2736 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2739 /* check for Admin queue errors */
2740 oldreg = reg = rd32(hw, hw->aq.arq.len);
2741 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2742 device_printf(dev, "ARQ VF Error detected\n");
2743 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2745 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2746 device_printf(dev, "ARQ Overflow Error detected\n");
2747 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2749 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2750 device_printf(dev, "ARQ Critical Error detected\n");
2751 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2754 wr32(hw, hw->aq.arq.len, reg);
2756 oldreg = reg = rd32(hw, hw->aq.asq.len);
2757 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2758 device_printf(dev, "ASQ VF Error detected\n");
2759 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2761 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2762 device_printf(dev, "ASQ Overflow Error detected\n");
2763 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2765 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2766 device_printf(dev, "ASQ Critical Error detected\n");
2767 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2770 wr32(hw, hw->aq.asq.len, reg);
2772 ixlv_enable_adminq_irq(hw);
2776 ixlv_add_sysctls(struct ixlv_sc *sc)
2778 device_t dev = sc->dev;
2779 struct ixl_vsi *vsi = &sc->vsi;
2780 struct i40e_eth_stats *es = &vsi->eth_stats;
2782 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2783 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2784 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2786 struct sysctl_oid *vsi_node, *queue_node;
2787 struct sysctl_oid_list *vsi_list, *queue_list;
2789 #define QUEUE_NAME_LEN 32
2790 char queue_namebuf[QUEUE_NAME_LEN];
2792 struct ixl_queue *queues = vsi->queues;
2793 struct tx_ring *txr;
2794 struct rx_ring *rxr;
2796 /* Driver statistics sysctls */
2797 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2798 CTLFLAG_RD, &sc->watchdog_events,
2799 "Watchdog timeouts");
2800 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2801 CTLFLAG_RD, &sc->admin_irq,
2802 "Admin Queue IRQ Handled");
2804 /* VSI statistics sysctls */
2805 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2806 CTLFLAG_RD, NULL, "VSI-specific statistics");
2807 vsi_list = SYSCTL_CHILDREN(vsi_node);
2809 struct ixl_sysctl_info ctls[] =
2811 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2812 {&es->rx_unicast, "ucast_pkts_rcvd",
2813 "Unicast Packets Received"},
2814 {&es->rx_multicast, "mcast_pkts_rcvd",
2815 "Multicast Packets Received"},
2816 {&es->rx_broadcast, "bcast_pkts_rcvd",
2817 "Broadcast Packets Received"},
2818 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2819 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2820 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2821 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2822 {&es->tx_multicast, "mcast_pkts_txd",
2823 "Multicast Packets Transmitted"},
2824 {&es->tx_broadcast, "bcast_pkts_txd",
2825 "Broadcast Packets Transmitted"},
2826 {&es->tx_errors, "tx_errors", "TX packet errors"},
2830 struct ixl_sysctl_info *entry = ctls;
2831 while (entry->stat != 0)
2833 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2834 CTLFLAG_RD, entry->stat,
2835 entry->description);
2840 for (int q = 0; q < vsi->num_queues; q++) {
2841 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2842 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2843 CTLFLAG_RD, NULL, "Queue Name");
2844 queue_list = SYSCTL_CHILDREN(queue_node);
2846 txr = &(queues[q].txr);
2847 rxr = &(queues[q].rxr);
2849 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2850 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2851 "m_defrag() failed");
2852 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2853 CTLFLAG_RD, &(queues[q].dropped_pkts),
2854 "Driver dropped packets");
2855 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2856 CTLFLAG_RD, &(queues[q].irqs),
2857 "irqs on this queue");
2858 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2859 CTLFLAG_RD, &(queues[q].tso),
2861 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2862 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2863 "Driver tx dma failure in xmit");
2864 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2865 CTLFLAG_RD, &(txr->no_desc),
2866 "Queue No Descriptor Available");
2867 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2868 CTLFLAG_RD, &(txr->total_packets),
2869 "Queue Packets Transmitted");
2870 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2871 CTLFLAG_RD, &(txr->tx_bytes),
2872 "Queue Bytes Transmitted");
2873 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2874 CTLFLAG_RD, &(rxr->rx_packets),
2875 "Queue Packets Received");
2876 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2877 CTLFLAG_RD, &(rxr->rx_bytes),
2878 "Queue Bytes Received");
2880 /* Examine queue state */
2881 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2882 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2883 sizeof(struct ixl_queue),
2884 ixlv_sysctl_qtx_tail_handler, "IU",
2885 "Queue Transmit Descriptor Tail");
2886 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2887 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2888 sizeof(struct ixl_queue),
2889 ixlv_sysctl_qrx_tail_handler, "IU",
2890 "Queue Receive Descriptor Tail");
2895 ixlv_init_filters(struct ixlv_sc *sc)
2897 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2898 M_DEVBUF, M_NOWAIT | M_ZERO);
2899 SLIST_INIT(sc->mac_filters);
2900 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2901 M_DEVBUF, M_NOWAIT | M_ZERO);
2902 SLIST_INIT(sc->vlan_filters);
2907 ixlv_free_filters(struct ixlv_sc *sc)
2909 struct ixlv_mac_filter *f;
2910 struct ixlv_vlan_filter *v;
2912 while (!SLIST_EMPTY(sc->mac_filters)) {
2913 f = SLIST_FIRST(sc->mac_filters);
2914 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2917 while (!SLIST_EMPTY(sc->vlan_filters)) {
2918 v = SLIST_FIRST(sc->vlan_filters);
2919 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2926 * ixlv_sysctl_qtx_tail_handler
2927 * Retrieves I40E_QTX_TAIL1 value from hardware
2931 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2933 struct ixl_queue *que;
2937 que = ((struct ixl_queue *)oidp->oid_arg1);
2940 val = rd32(que->vsi->hw, que->txr.tail);
2941 error = sysctl_handle_int(oidp, &val, 0, req);
2942 if (error || !req->newptr)
2948 * ixlv_sysctl_qrx_tail_handler
2949 * Retrieves I40E_QRX_TAIL1 value from hardware
2953 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2955 struct ixl_queue *que;
2959 que = ((struct ixl_queue *)oidp->oid_arg1);
2962 val = rd32(que->vsi->hw, que->rxr.tail);
2963 error = sysctl_handle_int(oidp, &val, 0, req);
2964 if (error || !req->newptr)