1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
40 /*********************************************************************
42 *********************************************************************/
43 char ixlv_driver_version[] = "1.1.18";
45 /*********************************************************************
48 * Used by probe to select devices to load on
49 * Last field stores an index into ixlv_strings
50 * Last entry must be all 0s
52 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
55 static ixl_vendor_info_t ixlv_vendor_info_array[] =
57 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
58 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
59 /* required last entry */
63 /*********************************************************************
64 * Table of branding strings
65 *********************************************************************/
67 static char *ixlv_strings[] = {
68 "Intel(R) Ethernet Connection XL710 VF Driver"
72 /*********************************************************************
74 *********************************************************************/
75 static int ixlv_probe(device_t);
76 static int ixlv_attach(device_t);
77 static int ixlv_detach(device_t);
78 static int ixlv_shutdown(device_t);
79 static void ixlv_init_locked(struct ixlv_sc *);
80 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
81 static void ixlv_free_pci_resources(struct ixlv_sc *);
82 static int ixlv_assign_msix(struct ixlv_sc *);
83 static int ixlv_init_msix(struct ixlv_sc *);
84 static int ixlv_init_taskqueue(struct ixlv_sc *);
85 static int ixlv_setup_queues(struct ixlv_sc *);
86 static void ixlv_config_rss(struct ixlv_sc *);
87 static void ixlv_stop(struct ixlv_sc *);
88 static void ixlv_add_multi(struct ixl_vsi *);
89 static void ixlv_del_multi(struct ixl_vsi *);
90 static void ixlv_free_queues(struct ixl_vsi *);
91 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
93 static int ixlv_media_change(struct ifnet *);
94 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
96 static void ixlv_local_timer(void *);
98 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void ixlv_init_filters(struct ixlv_sc *);
101 static void ixlv_free_filters(struct ixlv_sc *);
103 static void ixlv_msix_que(void *);
104 static void ixlv_msix_adminq(void *);
105 static void ixlv_do_adminq(void *, int);
106 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void ixlv_handle_que(void *, int);
108 static int ixlv_reset(struct ixlv_sc *);
109 static int ixlv_reset_complete(struct i40e_hw *);
110 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113 enum i40e_status_code);
115 static void ixlv_enable_adminq_irq(struct i40e_hw *);
116 static void ixlv_disable_adminq_irq(struct i40e_hw *);
117 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
118 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
120 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
121 static void ixlv_register_vlan(void *, struct ifnet *, u16);
122 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
124 static void ixlv_init_hw(struct ixlv_sc *);
125 static int ixlv_setup_vc(struct ixlv_sc *);
126 static int ixlv_vf_config(struct ixlv_sc *);
128 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
129 struct ifnet *, int);
131 static void ixlv_add_sysctls(struct ixlv_sc *);
132 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
133 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t ixlv_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_probe, ixlv_probe),
142 DEVMETHOD(device_attach, ixlv_attach),
143 DEVMETHOD(device_detach, ixlv_detach),
144 DEVMETHOD(device_shutdown, ixlv_shutdown),
148 static driver_t ixlv_driver = {
149 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
152 devclass_t ixlv_devclass;
153 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
155 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
156 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
159 ** TUNEABLE PARAMETERS:
162 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
163 "IXLV driver parameters");
166 ** Number of descriptors per ring:
167 ** - TX and RX are the same size
169 static int ixlv_ringsz = DEFAULT_RING;
170 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
171 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
172 &ixlv_ringsz, 0, "Descriptor Ring Size");
174 /* Set to zero to auto calculate */
175 int ixlv_max_queues = 0;
176 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
177 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
178 &ixlv_max_queues, 0, "Number of Queues");
181 ** Number of entries in Tx queue buf_ring.
182 ** Increasing this will reduce the number of
183 ** errors when transmitting fragmented UDP
186 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
187 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
188 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
189 &ixlv_txbrsz, 0, "TX Buf Ring Size");
192 ** Controls for Interrupt Throttling
193 ** - true/false for dynamic adjustment
194 ** - default values for static ITR
196 int ixlv_dynamic_rx_itr = 0;
197 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
198 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
199 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
201 int ixlv_dynamic_tx_itr = 0;
202 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
203 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
204 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
206 int ixlv_rx_itr = IXL_ITR_8K;
207 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
208 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
209 &ixlv_rx_itr, 0, "RX Interrupt Rate");
211 int ixlv_tx_itr = IXL_ITR_4K;
212 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
213 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
214 &ixlv_tx_itr, 0, "TX Interrupt Rate");
217 /*********************************************************************
218 * Device identification routine
220 * ixlv_probe determines if the driver should be loaded on
221 * the hardware based on PCI vendor/device id of the device.
223 * return BUS_PROBE_DEFAULT on success, positive on failure
224 *********************************************************************/
227 ixlv_probe(device_t dev)
229 ixl_vendor_info_t *ent;
231 u16 pci_vendor_id, pci_device_id;
232 u16 pci_subvendor_id, pci_subdevice_id;
233 char device_name[256];
235 INIT_DEBUGOUT("ixlv_probe: begin");
237 pci_vendor_id = pci_get_vendor(dev);
238 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
241 pci_device_id = pci_get_device(dev);
242 pci_subvendor_id = pci_get_subvendor(dev);
243 pci_subdevice_id = pci_get_subdevice(dev);
245 ent = ixlv_vendor_info_array;
246 while (ent->vendor_id != 0) {
247 if ((pci_vendor_id == ent->vendor_id) &&
248 (pci_device_id == ent->device_id) &&
250 ((pci_subvendor_id == ent->subvendor_id) ||
251 (ent->subvendor_id == 0)) &&
253 ((pci_subdevice_id == ent->subdevice_id) ||
254 (ent->subdevice_id == 0))) {
255 sprintf(device_name, "%s, Version - %s",
256 ixlv_strings[ent->index],
257 ixlv_driver_version);
258 device_set_desc_copy(dev, device_name);
259 return (BUS_PROBE_DEFAULT);
266 /*********************************************************************
267 * Device initialization routine
269 * The attach entry point is called when the driver is being loaded.
270 * This routine identifies the type of hardware, allocates all resources
271 * and initializes the hardware.
273 * return 0 on success, positive on failure
274 *********************************************************************/
277 ixlv_attach(device_t dev)
284 INIT_DBG_DEV(dev, "begin");
286 /* Allocate, clear, and link in our primary soft structure */
287 sc = device_get_softc(dev);
288 sc->dev = sc->osdep.dev = dev;
293 /* Initialize hw struct */
296 /* Allocate filter lists */
297 ixlv_init_filters(sc);
300 mtx_init(&sc->mtx, device_get_nameunit(dev),
301 "IXL SC Lock", MTX_DEF);
303 /* Set up the timer callout */
304 callout_init_mtx(&sc->timer, &sc->mtx, 0);
306 /* Do PCI setup - map BAR0, etc */
307 if (ixlv_allocate_pci_resources(sc)) {
308 device_printf(dev, "%s: Allocation of PCI resources failed\n",
314 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
316 error = i40e_set_mac_type(hw);
318 device_printf(dev, "%s: set_mac_type failed: %d\n",
323 error = ixlv_reset_complete(hw);
325 device_printf(dev, "%s: Device is still being reset\n",
330 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
332 error = ixlv_setup_vc(sc);
334 device_printf(dev, "%s: Error setting up PF comms, %d\n",
339 INIT_DBG_DEV(dev, "PF API version verified");
341 /* TODO: Figure out why MDD events occur when this reset is removed. */
342 /* Need API version before sending reset message */
343 error = ixlv_reset(sc);
345 device_printf(dev, "VF reset failed; reload the driver\n");
349 INIT_DBG_DEV(dev, "VF reset complete");
351 /* Ask for VF config from PF */
352 error = ixlv_vf_config(sc);
354 device_printf(dev, "Error getting configuration from PF: %d\n",
359 INIT_DBG_DEV(dev, "VF config from PF:");
360 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
361 sc->vf_res->num_vsis,
362 sc->vf_res->num_queue_pairs,
363 sc->vf_res->max_vectors,
364 sc->vf_res->max_mtu);
365 INIT_DBG_DEV(dev, "Offload flags: %#010x",
366 sc->vf_res->vf_offload_flags);
368 // TODO: Move this into ixlv_vf_config?
369 /* got VF config message back from PF, now we can parse it */
370 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
371 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
372 sc->vsi_res = &sc->vf_res->vsi_res[i];
375 device_printf(dev, "%s: no LAN VSI found\n", __func__);
380 INIT_DBG_DEV(dev, "Resource Acquisition complete");
382 /* If no mac address was assigned just make a random one */
383 if (!ixlv_check_ether_addr(hw->mac.addr)) {
384 u8 addr[ETHER_ADDR_LEN];
385 arc4rand(&addr, sizeof(addr), 0);
388 bcopy(addr, hw->mac.addr, sizeof(addr));
391 vsi->id = sc->vsi_res->vsi_id;
392 vsi->back = (void *)sc;
395 /* This allocates the memory and early settings */
396 if (ixlv_setup_queues(sc) != 0) {
397 device_printf(dev, "%s: setup queues failed!\n",
403 /* Setup the stack interface */
404 if (ixlv_setup_interface(dev, sc) != 0) {
405 device_printf(dev, "%s: setup interface failed!\n",
411 INIT_DBG_DEV(dev, "Queue memory and interface setup");
413 /* Do queue interrupt setup */
414 ixlv_assign_msix(sc);
416 /* Start AdminQ taskqueue */
417 ixlv_init_taskqueue(sc);
419 /* Initialize stats */
420 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
421 ixlv_add_sysctls(sc);
423 /* Register for VLAN events */
424 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
425 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
426 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
427 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
429 /* We want AQ enabled early */
430 ixlv_enable_adminq_irq(hw);
432 /* Set things up to run init */
433 sc->init_state = IXLV_INIT_READY;
435 ixl_vc_init_mgr(sc, &sc->vc_mgr);
437 INIT_DBG_DEV(dev, "end");
441 ixlv_free_queues(vsi);
443 free(sc->vf_res, M_DEVBUF);
445 i40e_shutdown_adminq(hw);
447 ixlv_free_pci_resources(sc);
449 mtx_destroy(&sc->mtx);
450 ixlv_free_filters(sc);
451 INIT_DBG_DEV(dev, "end: error %d", error);
455 /*********************************************************************
456 * Device removal routine
458 * The detach entry point is called when the driver is being removed.
459 * This routine stops the adapter and deallocates all the resources
460 * that were allocated for driver operation.
462 * return 0 on success, positive on failure
463 *********************************************************************/
466 ixlv_detach(device_t dev)
468 struct ixlv_sc *sc = device_get_softc(dev);
469 struct ixl_vsi *vsi = &sc->vsi;
471 INIT_DBG_DEV(dev, "begin");
473 /* Make sure VLANS are not using driver */
474 if (vsi->ifp->if_vlantrunk != NULL) {
475 device_printf(dev, "Vlan in use, detach first\n");
476 INIT_DBG_DEV(dev, "end");
481 ether_ifdetach(vsi->ifp);
482 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
485 mtx_unlock(&sc->mtx);
488 /* Unregister VLAN events */
489 if (vsi->vlan_attach != NULL)
490 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
491 if (vsi->vlan_detach != NULL)
492 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
495 callout_drain(&sc->vc_mgr.callout);
497 i40e_shutdown_adminq(&sc->hw);
498 taskqueue_free(sc->tq);
500 free(sc->vf_res, M_DEVBUF);
501 ixlv_free_pci_resources(sc);
502 ixlv_free_queues(vsi);
503 mtx_destroy(&sc->mtx);
504 ixlv_free_filters(sc);
506 bus_generic_detach(dev);
507 INIT_DBG_DEV(dev, "end");
511 /*********************************************************************
513 * Shutdown entry point
515 **********************************************************************/
518 ixlv_shutdown(device_t dev)
520 struct ixlv_sc *sc = device_get_softc(dev);
522 INIT_DBG_DEV(dev, "begin");
526 mtx_unlock(&sc->mtx);
528 INIT_DBG_DEV(dev, "end");
533 * Configure TXCSUM(IPV6) and TSO(4/6)
534 * - the hardware handles these together so we
538 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
540 /* Enable/disable TXCSUM/TSO4 */
541 if (!(ifp->if_capenable & IFCAP_TXCSUM)
542 && !(ifp->if_capenable & IFCAP_TSO4)) {
543 if (mask & IFCAP_TXCSUM) {
544 ifp->if_capenable |= IFCAP_TXCSUM;
545 /* enable TXCSUM, restore TSO if previously enabled */
546 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
547 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
548 ifp->if_capenable |= IFCAP_TSO4;
551 else if (mask & IFCAP_TSO4) {
552 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
553 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555 "TSO4 requires txcsum, enabling both...\n");
557 } else if((ifp->if_capenable & IFCAP_TXCSUM)
558 && !(ifp->if_capenable & IFCAP_TSO4)) {
559 if (mask & IFCAP_TXCSUM)
560 ifp->if_capenable &= ~IFCAP_TXCSUM;
561 else if (mask & IFCAP_TSO4)
562 ifp->if_capenable |= IFCAP_TSO4;
563 } else if((ifp->if_capenable & IFCAP_TXCSUM)
564 && (ifp->if_capenable & IFCAP_TSO4)) {
565 if (mask & IFCAP_TXCSUM) {
566 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
567 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
569 "TSO4 requires txcsum, disabling both...\n");
570 } else if (mask & IFCAP_TSO4)
571 ifp->if_capenable &= ~IFCAP_TSO4;
574 /* Enable/disable TXCSUM_IPV6/TSO6 */
575 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
576 && !(ifp->if_capenable & IFCAP_TSO6)) {
577 if (mask & IFCAP_TXCSUM_IPV6) {
578 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
579 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
580 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
581 ifp->if_capenable |= IFCAP_TSO6;
583 } else if (mask & IFCAP_TSO6) {
584 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
585 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
587 "TSO6 requires txcsum6, enabling both...\n");
589 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
590 && !(ifp->if_capenable & IFCAP_TSO6)) {
591 if (mask & IFCAP_TXCSUM_IPV6)
592 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
593 else if (mask & IFCAP_TSO6)
594 ifp->if_capenable |= IFCAP_TSO6;
595 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
596 && (ifp->if_capenable & IFCAP_TSO6)) {
597 if (mask & IFCAP_TXCSUM_IPV6) {
598 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
599 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
601 "TSO6 requires txcsum6, disabling both...\n");
602 } else if (mask & IFCAP_TSO6)
603 ifp->if_capenable &= ~IFCAP_TSO6;
607 /*********************************************************************
610 * ixlv_ioctl is called when the user wants to configure the
613 * return 0 on success, positive on failure
614 **********************************************************************/
617 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
619 struct ixl_vsi *vsi = ifp->if_softc;
620 struct ixlv_sc *sc = vsi->back;
621 struct ifreq *ifr = (struct ifreq *)data;
622 #if defined(INET) || defined(INET6)
623 struct ifaddr *ifa = (struct ifaddr *)data;
624 bool avoid_reset = FALSE;
633 if (ifa->ifa_addr->sa_family == AF_INET)
637 if (ifa->ifa_addr->sa_family == AF_INET6)
640 #if defined(INET) || defined(INET6)
642 ** Calling init results in link renegotiation,
643 ** so we avoid doing it when possible.
646 ifp->if_flags |= IFF_UP;
647 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
650 if (!(ifp->if_flags & IFF_NOARP))
651 arp_ifinit(ifp, ifa);
654 error = ether_ioctl(ifp, command, data);
658 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
660 if (ifr->ifr_mtu > IXL_MAX_FRAME -
661 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
663 IOCTL_DBG_IF(ifp, "mtu too large");
665 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
666 // ERJ: Interestingly enough, these types don't match
667 ifp->if_mtu = (u_long)ifr->ifr_mtu;
668 vsi->max_frame_size =
669 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
670 + ETHER_VLAN_ENCAP_LEN;
671 ixlv_init_locked(sc);
673 mtx_unlock(&sc->mtx);
676 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
678 if (ifp->if_flags & IFF_UP) {
679 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
680 ixlv_init_locked(sc);
682 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
684 sc->if_flags = ifp->if_flags;
685 mtx_unlock(&sc->mtx);
688 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
689 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
691 ixlv_disable_intr(vsi);
693 ixlv_enable_intr(vsi);
694 mtx_unlock(&sc->mtx);
698 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
699 if (sc->init_state == IXLV_RUNNING) {
701 ixlv_disable_intr(vsi);
703 ixlv_enable_intr(vsi);
704 mtx_unlock(&sc->mtx);
709 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
710 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
714 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
715 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
717 ixlv_cap_txcsum_tso(vsi, ifp, mask);
719 if (mask & IFCAP_RXCSUM)
720 ifp->if_capenable ^= IFCAP_RXCSUM;
721 if (mask & IFCAP_RXCSUM_IPV6)
722 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
723 if (mask & IFCAP_LRO)
724 ifp->if_capenable ^= IFCAP_LRO;
725 if (mask & IFCAP_VLAN_HWTAGGING)
726 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
727 if (mask & IFCAP_VLAN_HWFILTER)
728 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
729 if (mask & IFCAP_VLAN_HWTSO)
730 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
731 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
734 VLAN_CAPABILITIES(ifp);
740 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
741 error = ether_ioctl(ifp, command, data);
749 ** To do a reinit on the VF is unfortunately more complicated
750 ** than a physical device, we must have the PF more or less
751 ** completely recreate our memory, so many things that were
752 ** done only once at attach in traditional drivers now must be
753 ** redone at each reinitialization. This function does that
754 ** 'prelude' so we can then call the normal locked init code.
757 ixlv_reinit_locked(struct ixlv_sc *sc)
759 struct i40e_hw *hw = &sc->hw;
760 struct ixl_vsi *vsi = &sc->vsi;
761 struct ifnet *ifp = vsi->ifp;
762 struct ixlv_mac_filter *mf, *mf_temp;
763 struct ixlv_vlan_filter *vf;
766 INIT_DBG_IF(ifp, "begin");
768 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
771 error = ixlv_reset(sc);
773 INIT_DBG_IF(ifp, "VF was reset");
775 /* set the state in case we went thru RESET */
776 sc->init_state = IXLV_RUNNING;
779 ** Resetting the VF drops all filters from hardware;
780 ** we need to mark them to be re-added in init.
782 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
783 if (mf->flags & IXL_FILTER_DEL) {
784 SLIST_REMOVE(sc->mac_filters, mf,
785 ixlv_mac_filter, next);
788 mf->flags |= IXL_FILTER_ADD;
790 if (vsi->num_vlans != 0)
791 SLIST_FOREACH(vf, sc->vlan_filters, next)
792 vf->flags = IXL_FILTER_ADD;
793 else { /* clean any stale filters */
794 while (!SLIST_EMPTY(sc->vlan_filters)) {
795 vf = SLIST_FIRST(sc->vlan_filters);
796 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
801 ixlv_enable_adminq_irq(hw);
802 ixl_vc_flush(&sc->vc_mgr);
804 INIT_DBG_IF(ifp, "end");
809 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
810 enum i40e_status_code code)
817 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
818 * happens while a command is in progress, so we don't print an error
821 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
822 if_printf(sc->vsi.ifp,
823 "Error %d waiting for PF to complete operation %d\n",
829 ixlv_init_locked(struct ixlv_sc *sc)
831 struct i40e_hw *hw = &sc->hw;
832 struct ixl_vsi *vsi = &sc->vsi;
833 struct ixl_queue *que = vsi->queues;
834 struct ifnet *ifp = vsi->ifp;
837 INIT_DBG_IF(ifp, "begin");
839 IXLV_CORE_LOCK_ASSERT(sc);
841 /* Do a reinit first if an init has already been done */
842 if ((sc->init_state == IXLV_RUNNING) ||
843 (sc->init_state == IXLV_RESET_REQUIRED) ||
844 (sc->init_state == IXLV_RESET_PENDING))
845 error = ixlv_reinit_locked(sc);
846 /* Don't bother with init if we failed reinit */
850 /* Remove existing MAC filter if new MAC addr is set */
851 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
852 error = ixlv_del_mac_filter(sc, hw->mac.addr);
854 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
855 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
859 /* Check for an LAA mac address... */
860 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
862 ifp->if_hwassist = 0;
863 if (ifp->if_capenable & IFCAP_TSO)
864 ifp->if_hwassist |= CSUM_TSO;
865 if (ifp->if_capenable & IFCAP_TXCSUM)
866 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
867 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
868 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
870 /* Add mac filter for this VF to PF */
871 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
872 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
873 if (!error || error == EEXIST)
874 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
875 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
879 /* Setup vlan's if needed */
880 ixlv_setup_vlan_filters(sc);
882 /* Prepare the queues for operation */
883 for (int i = 0; i < vsi->num_queues; i++, que++) {
884 struct rx_ring *rxr = &que->rxr;
886 ixl_init_tx_ring(que);
888 if (vsi->max_frame_size <= 2048)
889 rxr->mbuf_sz = MCLBYTES;
891 rxr->mbuf_sz = MJUMPAGESIZE;
892 ixl_init_rx_ring(que);
895 /* Configure queues */
896 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
897 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
903 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
904 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
907 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
908 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
910 /* Start the local timer */
911 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
913 sc->init_state = IXLV_RUNNING;
916 INIT_DBG_IF(ifp, "end");
921 ** Init entry point for the stack
926 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
927 struct ixlv_sc *sc = vsi->back;
931 ixlv_init_locked(sc);
932 mtx_unlock(&sc->mtx);
934 /* Wait for init_locked to finish */
935 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
936 && ++retries < 100) {
939 if (retries >= IXLV_AQ_MAX_ERR)
941 "Init failed to complete in alloted time!\n");
945 * ixlv_attach() helper function; gathers information about
946 * the (virtual) hardware for use elsewhere in the driver.
949 ixlv_init_hw(struct ixlv_sc *sc)
951 struct i40e_hw *hw = &sc->hw;
952 device_t dev = sc->dev;
954 /* Save off the information about this board */
955 hw->vendor_id = pci_get_vendor(dev);
956 hw->device_id = pci_get_device(dev);
957 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
958 hw->subsystem_vendor_id =
959 pci_read_config(dev, PCIR_SUBVEND_0, 2);
960 hw->subsystem_device_id =
961 pci_read_config(dev, PCIR_SUBDEV_0, 2);
963 hw->bus.device = pci_get_slot(dev);
964 hw->bus.func = pci_get_function(dev);
968 * ixlv_attach() helper function; initalizes the admin queue
969 * and attempts to establish contact with the PF by
970 * retrying the initial "API version" message several times
971 * or until the PF responds.
974 ixlv_setup_vc(struct ixlv_sc *sc)
976 struct i40e_hw *hw = &sc->hw;
977 device_t dev = sc->dev;
978 int error = 0, ret_error = 0, asq_retries = 0;
979 bool send_api_ver_retried = 0;
981 /* Need to set these AQ paramters before initializing AQ */
982 hw->aq.num_arq_entries = IXL_AQ_LEN;
983 hw->aq.num_asq_entries = IXL_AQ_LEN;
984 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
985 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
987 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
988 /* Initialize admin queue */
989 error = i40e_init_adminq(hw);
991 device_printf(dev, "%s: init_adminq failed: %d\n",
997 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1000 /* Send VF's API version */
1001 error = ixlv_send_api_ver(sc);
1003 i40e_shutdown_adminq(hw);
1005 device_printf(dev, "%s: unable to send api"
1006 " version to PF on attempt %d, error %d\n",
1007 __func__, i+1, error);
1011 while (!i40e_asq_done(hw)) {
1012 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1013 i40e_shutdown_adminq(hw);
1014 DDPRINTF(dev, "Admin Queue timeout "
1015 "(waiting for send_api_ver), %d more retries...",
1016 IXLV_AQ_MAX_ERR - (i + 1));
1020 i40e_msec_delay(10);
1022 if (asq_retries > IXLV_AQ_MAX_ERR)
1025 INIT_DBG_DEV(dev, "Sent API version message to PF");
1027 /* Verify that the VF accepts the PF's API version */
1028 error = ixlv_verify_api_ver(sc);
1029 if (error == ETIMEDOUT) {
1030 if (!send_api_ver_retried) {
1031 /* Resend message, one more time */
1032 send_api_ver_retried++;
1034 "%s: Timeout while verifying API version on first"
1035 " try!\n", __func__);
1039 "%s: Timeout while verifying API version on second"
1040 " try!\n", __func__);
1047 "%s: Unable to verify API version,"
1048 " error %d\n", __func__, error);
1055 i40e_shutdown_adminq(hw);
1060 * ixlv_attach() helper function; asks the PF for this VF's
1061 * configuration, and saves the information if it receives it.
1064 ixlv_vf_config(struct ixlv_sc *sc)
1066 struct i40e_hw *hw = &sc->hw;
1067 device_t dev = sc->dev;
1068 int bufsz, error = 0, ret_error = 0;
1069 int asq_retries, retried = 0;
1072 error = ixlv_send_vf_config_msg(sc);
1075 "%s: Unable to send VF config request, attempt %d,"
1076 " error %d\n", __func__, retried + 1, error);
1081 while (!i40e_asq_done(hw)) {
1082 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1083 device_printf(dev, "%s: Admin Queue timeout "
1084 "(waiting for send_vf_config_msg), attempt %d\n",
1085 __func__, retried + 1);
1089 i40e_msec_delay(10);
1092 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1096 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1097 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1098 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1101 "%s: Unable to allocate memory for VF configuration"
1102 " message from PF on attempt %d\n", __func__, retried + 1);
1108 /* Check for VF config response */
1109 error = ixlv_get_vf_config(sc);
1110 if (error == ETIMEDOUT) {
1111 /* The 1st time we timeout, send the configuration message again */
1119 "%s: Unable to get VF configuration from PF after %d tries!\n",
1120 __func__, retried + 1);
1126 free(sc->vf_res, M_DEVBUF);
1132 * Allocate MSI/X vectors, setup the AQ vector early
1135 ixlv_init_msix(struct ixlv_sc *sc)
1137 device_t dev = sc->dev;
1138 int rid, want, vectors, queues, available;
1140 rid = PCIR_BAR(IXL_BAR);
1141 sc->msix_mem = bus_alloc_resource_any(dev,
1142 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1143 if (!sc->msix_mem) {
1144 /* May not be enabled */
1145 device_printf(sc->dev,
1146 "Unable to map MSIX table \n");
1150 available = pci_msix_count(dev);
1151 if (available == 0) { /* system has msix disabled */
1152 bus_release_resource(dev, SYS_RES_MEMORY,
1154 sc->msix_mem = NULL;
1158 /* Figure out a reasonable auto config value */
1159 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1161 /* Override with hardcoded value if sane */
1162 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1163 queues = ixlv_max_queues;
1165 /* Enforce the VF max value */
1166 if (queues > IXLV_MAX_QUEUES)
1167 queues = IXLV_MAX_QUEUES;
1170 ** Want one vector (RX/TX pair) per queue
1171 ** plus an additional for the admin queue.
1174 if (want <= available) /* Have enough */
1177 device_printf(sc->dev,
1178 "MSIX Configuration Problem, "
1179 "%d vectors available but %d wanted!\n",
1184 if (pci_alloc_msix(dev, &vectors) == 0) {
1185 device_printf(sc->dev,
1186 "Using MSIX interrupts with %d vectors\n", vectors);
1188 sc->vsi.num_queues = queues;
1192 ** Explicitly set the guest PCI BUSMASTER capability
1193 ** and we must rewrite the ENABLE in the MSIX control
1194 ** register again at this point to cause the host to
1195 ** successfully initialize us.
1200 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1201 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1202 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1203 pci_find_cap(dev, PCIY_MSIX, &rid);
1204 rid += PCIR_MSIX_CTRL;
1205 msix_ctrl = pci_read_config(dev, rid, 2);
1206 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1207 pci_write_config(dev, rid, msix_ctrl, 2);
1210 /* Next we need to setup the vector for the Admin Queue */
1211 rid = 1; // zero vector + 1
1212 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1213 &rid, RF_SHAREABLE | RF_ACTIVE);
1214 if (sc->res == NULL) {
1215 device_printf(dev,"Unable to allocate"
1216 " bus resource: AQ interrupt \n");
1219 if (bus_setup_intr(dev, sc->res,
1220 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1221 ixlv_msix_adminq, sc, &sc->tag)) {
1223 device_printf(dev, "Failed to register AQ handler");
1226 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1231 /* The VF driver MUST use MSIX */
1236 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1239 device_t dev = sc->dev;
1242 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1245 if (!(sc->pci_mem)) {
1246 device_printf(dev,"Unable to allocate bus resource: memory\n");
1250 sc->osdep.mem_bus_space_tag =
1251 rman_get_bustag(sc->pci_mem);
1252 sc->osdep.mem_bus_space_handle =
1253 rman_get_bushandle(sc->pci_mem);
1254 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1255 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1256 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1258 sc->hw.back = &sc->osdep;
1260 /* Disable adminq interrupts */
1261 ixlv_disable_adminq_irq(&sc->hw);
1264 ** Now setup MSI/X, it will return
1265 ** us the number of supported vectors
1267 sc->msix = ixlv_init_msix(sc);
1269 /* We fail without MSIX support */
1277 ixlv_free_pci_resources(struct ixlv_sc *sc)
1279 struct ixl_vsi *vsi = &sc->vsi;
1280 struct ixl_queue *que = vsi->queues;
1281 device_t dev = sc->dev;
1283 /* We may get here before stations are setup */
1288 ** Release all msix queue resources:
1290 for (int i = 0; i < vsi->num_queues; i++, que++) {
1291 int rid = que->msix + 1;
1292 if (que->tag != NULL) {
1293 bus_teardown_intr(dev, que->res, que->tag);
1296 if (que->res != NULL)
1297 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1301 /* Clean the AdminQ interrupt */
1302 if (sc->tag != NULL) {
1303 bus_teardown_intr(dev, sc->res, sc->tag);
1306 if (sc->res != NULL)
1307 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1309 pci_release_msi(dev);
1311 if (sc->msix_mem != NULL)
1312 bus_release_resource(dev, SYS_RES_MEMORY,
1313 PCIR_BAR(IXL_BAR), sc->msix_mem);
1315 if (sc->pci_mem != NULL)
1316 bus_release_resource(dev, SYS_RES_MEMORY,
1317 PCIR_BAR(0), sc->pci_mem);
1323 * Create taskqueue and tasklet for Admin Queue interrupts.
1326 ixlv_init_taskqueue(struct ixlv_sc *sc)
1330 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1332 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1333 taskqueue_thread_enqueue, &sc->tq);
1334 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1335 device_get_nameunit(sc->dev));
1340 /*********************************************************************
1342 * Setup MSIX Interrupt resources and handlers for the VSI queues
1344 **********************************************************************/
1346 ixlv_assign_msix(struct ixlv_sc *sc)
1348 device_t dev = sc->dev;
1349 struct ixl_vsi *vsi = &sc->vsi;
1350 struct ixl_queue *que = vsi->queues;
1351 struct tx_ring *txr;
1352 int error, rid, vector = 1;
1354 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1357 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1358 RF_SHAREABLE | RF_ACTIVE);
1359 if (que->res == NULL) {
1360 device_printf(dev,"Unable to allocate"
1361 " bus resource: que interrupt [%d]\n", vector);
1364 /* Set the handler function */
1365 error = bus_setup_intr(dev, que->res,
1366 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1367 ixlv_msix_que, que, &que->tag);
1370 device_printf(dev, "Failed to register que handler");
1373 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1374 /* Bind the vector to a CPU */
1375 bus_bind_intr(dev, que->res, i);
1377 vsi->que_mask |= (u64)(1 << que->msix);
1378 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1379 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1380 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1381 taskqueue_thread_enqueue, &que->tq);
1382 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1383 device_get_nameunit(sc->dev));
1390 ** Requests a VF reset from the PF.
1392 ** Requires the VF's Admin Queue to be initialized.
1395 ixlv_reset(struct ixlv_sc *sc)
1397 struct i40e_hw *hw = &sc->hw;
1398 device_t dev = sc->dev;
1401 /* Ask the PF to reset us if we are initiating */
1402 if (sc->init_state != IXLV_RESET_PENDING)
1403 ixlv_request_reset(sc);
1405 i40e_msec_delay(100);
1406 error = ixlv_reset_complete(hw);
1408 device_printf(dev, "%s: VF reset failed\n",
1413 error = i40e_shutdown_adminq(hw);
1415 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1420 error = i40e_init_adminq(hw);
1422 device_printf(dev, "%s: init_adminq failed: %d\n",
1431 ixlv_reset_complete(struct i40e_hw *hw)
1435 for (int i = 0; i < 100; i++) {
1436 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1437 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1439 if ((reg == I40E_VFR_VFACTIVE) ||
1440 (reg == I40E_VFR_COMPLETED))
1442 i40e_msec_delay(100);
1449 /*********************************************************************
1451 * Setup networking device structure and register an interface.
1453 **********************************************************************/
1455 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1458 struct ixl_vsi *vsi = &sc->vsi;
1459 struct ixl_queue *que = vsi->queues;
1461 INIT_DBG_DEV(dev, "begin");
1463 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1465 device_printf(dev, "%s: could not allocate ifnet"
1466 " structure!\n", __func__);
1470 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1472 ifp->if_mtu = ETHERMTU;
1473 ifp->if_baudrate = 4000000000; // ??
1474 ifp->if_init = ixlv_init;
1475 ifp->if_softc = vsi;
1476 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1477 ifp->if_ioctl = ixlv_ioctl;
1479 #if __FreeBSD_version >= 1100000
1480 if_setgetcounterfn(ifp, ixl_get_counter);
1483 ifp->if_transmit = ixl_mq_start;
1485 ifp->if_qflush = ixl_qflush;
1486 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1488 ether_ifattach(ifp, sc->hw.mac.addr);
1490 vsi->max_frame_size =
1491 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1492 + ETHER_VLAN_ENCAP_LEN;
1495 * Tell the upper layer(s) we support long frames.
1497 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1499 ifp->if_capabilities |= IFCAP_HWCSUM;
1500 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1501 ifp->if_capabilities |= IFCAP_TSO;
1502 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1504 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1509 ifp->if_capenable = ifp->if_capabilities;
1512 ** Don't turn this on by default, if vlans are
1513 ** created on another pseudo device (eg. lagg)
1514 ** then vlan events are not passed thru, breaking
1515 ** operation, but with HW FILTER off it works. If
1516 ** using vlans directly on the ixl driver you can
1517 ** enable this and get full hardware tag filtering.
1519 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1522 * Specify the media types supported by this adapter and register
1523 * callbacks to update media and link information
1525 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1528 // JFV Add media types later?
1530 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1531 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1533 INIT_DBG_DEV(dev, "end");
1538 ** Allocate and setup the interface queues
1541 ixlv_setup_queues(struct ixlv_sc *sc)
1543 device_t dev = sc->dev;
1544 struct ixl_vsi *vsi;
1545 struct ixl_queue *que;
1546 struct tx_ring *txr;
1547 struct rx_ring *rxr;
1549 int error = I40E_SUCCESS;
1552 vsi->back = (void *)sc;
1556 /* Get memory for the station queues */
1558 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1559 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1560 device_printf(dev, "Unable to allocate queue memory\n");
1565 for (int i = 0; i < vsi->num_queues; i++) {
1566 que = &vsi->queues[i];
1567 que->num_desc = ixlv_ringsz;
1570 /* mark the queue as active */
1571 vsi->active_queues |= (u64)1 << que->me;
1575 txr->tail = I40E_QTX_TAIL1(que->me);
1576 /* Initialize the TX lock */
1577 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1578 device_get_nameunit(dev), que->me);
1579 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1581 ** Create the TX descriptor ring, the extra int is
1582 ** added as the location for HEAD WB.
1584 tsize = roundup2((que->num_desc *
1585 sizeof(struct i40e_tx_desc)) +
1586 sizeof(u32), DBA_ALIGN);
1587 if (i40e_allocate_dma_mem(&sc->hw,
1588 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1590 "Unable to allocate TX Descriptor memory\n");
1594 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1595 bzero((void *)txr->base, tsize);
1596 /* Now allocate transmit soft structs for the ring */
1597 if (ixl_allocate_tx_data(que)) {
1599 "Critical Failure setting up TX structures\n");
1603 /* Allocate a buf ring */
1604 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1605 M_WAITOK, &txr->mtx);
1606 if (txr->br == NULL) {
1608 "Critical Failure setting up TX buf ring\n");
1614 * Next the RX queues...
1616 rsize = roundup2(que->num_desc *
1617 sizeof(union i40e_rx_desc), DBA_ALIGN);
1620 rxr->tail = I40E_QRX_TAIL1(que->me);
1622 /* Initialize the RX side lock */
1623 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1624 device_get_nameunit(dev), que->me);
1625 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1627 if (i40e_allocate_dma_mem(&sc->hw,
1628 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1630 "Unable to allocate RX Descriptor memory\n");
1634 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1635 bzero((void *)rxr->base, rsize);
1637 /* Allocate receive soft structs for the ring*/
1638 if (ixl_allocate_rx_data(que)) {
1640 "Critical Failure setting up receive structs\n");
1649 free(vsi->queues, M_DEVBUF);
1650 for (int i = 0; i < vsi->num_queues; i++) {
1651 que = &vsi->queues[i];
1655 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1657 i40e_free_dma_mem(&sc->hw, &txr->dma);
1665 ** This routine is run via an vlan config EVENT,
1666 ** it enables us to use the HW Filter table since
1667 ** we can get the vlan id. This just creates the
1668 ** entry in the soft version of the VFTA, init will
1669 ** repopulate the real table.
1672 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1674 struct ixl_vsi *vsi = ifp->if_softc;
1675 struct ixlv_sc *sc = vsi->back;
1676 struct ixlv_vlan_filter *v;
1679 if (ifp->if_softc != arg) /* Not our event */
1682 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1685 /* Sanity check - make sure it doesn't already exist */
1686 SLIST_FOREACH(v, sc->vlan_filters, next) {
1687 if (v->vlan == vtag)
1693 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1694 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1696 v->flags = IXL_FILTER_ADD;
1697 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1698 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1699 mtx_unlock(&sc->mtx);
1704 ** This routine is run via an vlan
1705 ** unconfig EVENT, remove our entry
1706 ** in the soft vfta.
1709 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1711 struct ixl_vsi *vsi = ifp->if_softc;
1712 struct ixlv_sc *sc = vsi->back;
1713 struct ixlv_vlan_filter *v;
1716 if (ifp->if_softc != arg)
1719 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1723 SLIST_FOREACH(v, sc->vlan_filters, next) {
1724 if (v->vlan == vtag) {
1725 v->flags = IXL_FILTER_DEL;
1731 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1732 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1733 mtx_unlock(&sc->mtx);
1738 ** Get a new filter and add it to the mac filter list.
1740 static struct ixlv_mac_filter *
1741 ixlv_get_mac_filter(struct ixlv_sc *sc)
1743 struct ixlv_mac_filter *f;
1745 f = malloc(sizeof(struct ixlv_mac_filter),
1746 M_DEVBUF, M_NOWAIT | M_ZERO);
1748 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1754 ** Find the filter with matching MAC address
1756 static struct ixlv_mac_filter *
1757 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1759 struct ixlv_mac_filter *f;
1762 SLIST_FOREACH(f, sc->mac_filters, next) {
1763 if (cmp_etheraddr(f->macaddr, macaddr)) {
1775 ** Admin Queue interrupt handler
1778 ixlv_msix_adminq(void *arg)
1780 struct ixlv_sc *sc = arg;
1781 struct i40e_hw *hw = &sc->hw;
1782 device_t dev = sc->dev;
1783 u32 reg, mask, oldreg;
1785 reg = rd32(hw, I40E_VFINT_ICR01);
1786 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1788 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1789 reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1790 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1792 /* check for Admin queue errors */
1793 oldreg = reg = rd32(hw, hw->aq.arq.len);
1794 if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1795 device_printf(dev, "ARQ VF Error detected\n");
1796 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1798 if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1799 device_printf(dev, "ARQ Overflow Error detected\n");
1800 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1802 if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1803 device_printf(dev, "ARQ Critical Error detected\n");
1804 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1807 wr32(hw, hw->aq.arq.len, reg);
1809 oldreg = reg = rd32(hw, hw->aq.asq.len);
1810 if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1811 device_printf(dev, "ASQ VF Error detected\n");
1812 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1814 if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1815 device_printf(dev, "ASQ Overflow Error detected\n");
1816 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1818 if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1819 device_printf(dev, "ASQ Critical Error detected\n");
1820 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1823 wr32(hw, hw->aq.asq.len, reg);
1825 /* re-enable interrupt causes */
1826 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1827 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1830 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1835 ixlv_enable_intr(struct ixl_vsi *vsi)
1837 struct i40e_hw *hw = vsi->hw;
1838 struct ixl_queue *que = vsi->queues;
1840 ixlv_enable_adminq_irq(hw);
1841 for (int i = 0; i < vsi->num_queues; i++, que++)
1842 ixlv_enable_queue_irq(hw, que->me);
1846 ixlv_disable_intr(struct ixl_vsi *vsi)
1848 struct i40e_hw *hw = vsi->hw;
1849 struct ixl_queue *que = vsi->queues;
1851 ixlv_disable_adminq_irq(hw);
1852 for (int i = 0; i < vsi->num_queues; i++, que++)
1853 ixlv_disable_queue_irq(hw, que->me);
1858 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1860 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1861 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1863 rd32(hw, I40E_VFGEN_RSTAT);
1868 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1870 wr32(hw, I40E_VFINT_DYN_CTL01,
1871 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1872 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1873 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1875 rd32(hw, I40E_VFGEN_RSTAT);
1880 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1884 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1885 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
1886 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1890 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1892 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1893 rd32(hw, I40E_VFGEN_RSTAT);
1899 ** Provide a update to the queue RX
1900 ** interrupt moderation value.
1903 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1905 struct ixl_vsi *vsi = que->vsi;
1906 struct i40e_hw *hw = vsi->hw;
1907 struct rx_ring *rxr = &que->rxr;
1913 /* Idle, do nothing */
1914 if (rxr->bytes == 0)
1917 if (ixlv_dynamic_rx_itr) {
1918 rx_bytes = rxr->bytes/rxr->itr;
1921 /* Adjust latency range */
1922 switch (rxr->latency) {
1923 case IXL_LOW_LATENCY:
1924 if (rx_bytes > 10) {
1925 rx_latency = IXL_AVE_LATENCY;
1926 rx_itr = IXL_ITR_20K;
1929 case IXL_AVE_LATENCY:
1930 if (rx_bytes > 20) {
1931 rx_latency = IXL_BULK_LATENCY;
1932 rx_itr = IXL_ITR_8K;
1933 } else if (rx_bytes <= 10) {
1934 rx_latency = IXL_LOW_LATENCY;
1935 rx_itr = IXL_ITR_100K;
1938 case IXL_BULK_LATENCY:
1939 if (rx_bytes <= 20) {
1940 rx_latency = IXL_AVE_LATENCY;
1941 rx_itr = IXL_ITR_20K;
1946 rxr->latency = rx_latency;
1948 if (rx_itr != rxr->itr) {
1949 /* do an exponential smoothing */
1950 rx_itr = (10 * rx_itr * rxr->itr) /
1951 ((9 * rx_itr) + rxr->itr);
1952 rxr->itr = rx_itr & IXL_MAX_ITR;
1953 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1954 que->me), rxr->itr);
1956 } else { /* We may have have toggled to non-dynamic */
1957 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1958 vsi->rx_itr_setting = ixlv_rx_itr;
1959 /* Update the hardware if needed */
1960 if (rxr->itr != vsi->rx_itr_setting) {
1961 rxr->itr = vsi->rx_itr_setting;
1962 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1963 que->me), rxr->itr);
1973 ** Provide a update to the queue TX
1974 ** interrupt moderation value.
1977 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1979 struct ixl_vsi *vsi = que->vsi;
1980 struct i40e_hw *hw = vsi->hw;
1981 struct tx_ring *txr = &que->txr;
1987 /* Idle, do nothing */
1988 if (txr->bytes == 0)
1991 if (ixlv_dynamic_tx_itr) {
1992 tx_bytes = txr->bytes/txr->itr;
1995 switch (txr->latency) {
1996 case IXL_LOW_LATENCY:
1997 if (tx_bytes > 10) {
1998 tx_latency = IXL_AVE_LATENCY;
1999 tx_itr = IXL_ITR_20K;
2002 case IXL_AVE_LATENCY:
2003 if (tx_bytes > 20) {
2004 tx_latency = IXL_BULK_LATENCY;
2005 tx_itr = IXL_ITR_8K;
2006 } else if (tx_bytes <= 10) {
2007 tx_latency = IXL_LOW_LATENCY;
2008 tx_itr = IXL_ITR_100K;
2011 case IXL_BULK_LATENCY:
2012 if (tx_bytes <= 20) {
2013 tx_latency = IXL_AVE_LATENCY;
2014 tx_itr = IXL_ITR_20K;
2019 txr->latency = tx_latency;
2021 if (tx_itr != txr->itr) {
2022 /* do an exponential smoothing */
2023 tx_itr = (10 * tx_itr * txr->itr) /
2024 ((9 * tx_itr) + txr->itr);
2025 txr->itr = tx_itr & IXL_MAX_ITR;
2026 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2027 que->me), txr->itr);
2030 } else { /* We may have have toggled to non-dynamic */
2031 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2032 vsi->tx_itr_setting = ixlv_tx_itr;
2033 /* Update the hardware if needed */
2034 if (txr->itr != vsi->tx_itr_setting) {
2035 txr->itr = vsi->tx_itr_setting;
2036 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2037 que->me), txr->itr);
2048 ** MSIX Interrupt Handlers and Tasklets
2052 ixlv_handle_que(void *context, int pending)
2054 struct ixl_queue *que = context;
2055 struct ixl_vsi *vsi = que->vsi;
2056 struct i40e_hw *hw = vsi->hw;
2057 struct tx_ring *txr = &que->txr;
2058 struct ifnet *ifp = vsi->ifp;
2061 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2062 more = ixl_rxeof(que, IXL_RX_LIMIT);
2063 mtx_lock(&txr->mtx);
2065 if (!drbr_empty(ifp, txr->br))
2066 ixl_mq_start_locked(ifp, txr);
2067 mtx_unlock(&txr->mtx);
2069 taskqueue_enqueue(que->tq, &que->task);
2074 /* Reenable this interrupt - hmmm */
2075 ixlv_enable_queue_irq(hw, que->me);
2080 /*********************************************************************
2082 * MSIX Queue Interrupt Service routine
2084 **********************************************************************/
2086 ixlv_msix_que(void *arg)
2088 struct ixl_queue *que = arg;
2089 struct ixl_vsi *vsi = que->vsi;
2090 struct i40e_hw *hw = vsi->hw;
2091 struct tx_ring *txr = &que->txr;
2092 bool more_tx, more_rx;
2094 /* Spurious interrupts are ignored */
2095 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2100 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2102 mtx_lock(&txr->mtx);
2103 more_tx = ixl_txeof(que);
2105 ** Make certain that if the stack
2106 ** has anything queued the task gets
2107 ** scheduled to handle it.
2109 if (!drbr_empty(vsi->ifp, txr->br))
2111 mtx_unlock(&txr->mtx);
2113 ixlv_set_queue_rx_itr(que);
2114 ixlv_set_queue_tx_itr(que);
2116 if (more_tx || more_rx)
2117 taskqueue_enqueue(que->tq, &que->task);
2119 ixlv_enable_queue_irq(hw, que->me);
2125 /*********************************************************************
2127 * Media Ioctl callback
2129 * This routine is called whenever the user queries the status of
2130 * the interface using ifconfig.
2132 **********************************************************************/
2134 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2136 struct ixl_vsi *vsi = ifp->if_softc;
2137 struct ixlv_sc *sc = vsi->back;
2139 INIT_DBG_IF(ifp, "begin");
2143 ixlv_update_link_status(sc);
2145 ifmr->ifm_status = IFM_AVALID;
2146 ifmr->ifm_active = IFM_ETHER;
2148 if (!vsi->link_up) {
2149 mtx_unlock(&sc->mtx);
2150 INIT_DBG_IF(ifp, "end: link not up");
2154 ifmr->ifm_status |= IFM_ACTIVE;
2155 /* Hardware is always full-duplex */
2156 ifmr->ifm_active |= IFM_FDX;
2157 mtx_unlock(&sc->mtx);
2158 INIT_DBG_IF(ifp, "end");
2162 /*********************************************************************
2164 * Media Ioctl callback
2166 * This routine is called when the user changes speed/duplex using
2167 * media/mediopt option with ifconfig.
2169 **********************************************************************/
2171 ixlv_media_change(struct ifnet * ifp)
2173 struct ixl_vsi *vsi = ifp->if_softc;
2174 struct ifmedia *ifm = &vsi->media;
2176 INIT_DBG_IF(ifp, "begin");
2178 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2181 INIT_DBG_IF(ifp, "end");
2186 /*********************************************************************
2187 * Multicast Initialization
2189 * This routine is called by init to reset a fresh state.
2191 **********************************************************************/
2194 ixlv_init_multi(struct ixl_vsi *vsi)
2196 struct ixlv_mac_filter *f;
2197 struct ixlv_sc *sc = vsi->back;
2200 IOCTL_DBG_IF(vsi->ifp, "begin");
2202 /* First clear any multicast filters */
2203 SLIST_FOREACH(f, sc->mac_filters, next) {
2204 if ((f->flags & IXL_FILTER_USED)
2205 && (f->flags & IXL_FILTER_MC)) {
2206 f->flags |= IXL_FILTER_DEL;
2211 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2212 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2215 IOCTL_DBG_IF(vsi->ifp, "end");
2219 ixlv_add_multi(struct ixl_vsi *vsi)
2221 struct ifmultiaddr *ifma;
2222 struct ifnet *ifp = vsi->ifp;
2223 struct ixlv_sc *sc = vsi->back;
2226 IOCTL_DBG_IF(ifp, "begin");
2228 if_maddr_rlock(ifp);
2230 ** Get a count, to decide if we
2231 ** simply use multicast promiscuous.
2233 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2234 if (ifma->ifma_addr->sa_family != AF_LINK)
2238 if_maddr_runlock(ifp);
2240 // TODO: Remove -- cannot set promiscuous mode in a VF
2241 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2242 /* delete all multicast filters */
2243 ixlv_init_multi(vsi);
2244 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2245 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2246 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2248 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2253 if_maddr_rlock(ifp);
2254 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2255 if (ifma->ifma_addr->sa_family != AF_LINK)
2257 if (!ixlv_add_mac_filter(sc,
2258 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2262 if_maddr_runlock(ifp);
2264 ** Notify AQ task that sw filters need to be
2268 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2269 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2272 IOCTL_DBG_IF(ifp, "end");
2276 ixlv_del_multi(struct ixl_vsi *vsi)
2278 struct ixlv_mac_filter *f;
2279 struct ifmultiaddr *ifma;
2280 struct ifnet *ifp = vsi->ifp;
2281 struct ixlv_sc *sc = vsi->back;
2285 IOCTL_DBG_IF(ifp, "begin");
2287 /* Search for removed multicast addresses */
2288 if_maddr_rlock(ifp);
2289 SLIST_FOREACH(f, sc->mac_filters, next) {
2290 if ((f->flags & IXL_FILTER_USED)
2291 && (f->flags & IXL_FILTER_MC)) {
2292 /* check if mac address in filter is in sc's list */
2294 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2295 if (ifma->ifma_addr->sa_family != AF_LINK)
2298 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2299 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2304 /* if this filter is not in the sc's list, remove it */
2305 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2306 f->flags |= IXL_FILTER_DEL;
2308 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2309 MAC_FORMAT_ARGS(f->macaddr));
2311 else if (match == FALSE)
2312 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2313 MAC_FORMAT_ARGS(f->macaddr));
2316 if_maddr_runlock(ifp);
2319 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2320 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2323 IOCTL_DBG_IF(ifp, "end");
2326 /*********************************************************************
2329 * This routine checks for link status,updates statistics,
2330 * and runs the watchdog check.
2332 **********************************************************************/
2335 ixlv_local_timer(void *arg)
2337 struct ixlv_sc *sc = arg;
2338 struct i40e_hw *hw = &sc->hw;
2339 struct ixl_vsi *vsi = &sc->vsi;
2340 struct ixl_queue *que = vsi->queues;
2341 device_t dev = sc->dev;
2345 IXLV_CORE_LOCK_ASSERT(sc);
2347 /* If Reset is in progress just bail */
2348 if (sc->init_state == IXLV_RESET_PENDING)
2351 /* Check for when PF triggers a VF reset */
2352 val = rd32(hw, I40E_VFGEN_RSTAT) &
2353 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2355 if (val != I40E_VFR_VFACTIVE
2356 && val != I40E_VFR_COMPLETED) {
2357 DDPRINTF(dev, "reset in progress! (%d)", val);
2361 ixlv_request_stats(sc);
2363 /* clean and process any events */
2364 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2367 ** Check status on the queues for a hang
2369 mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2370 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2372 for (int i = 0; i < vsi->num_queues; i++,que++) {
2373 /* Any queues with outstanding work get a sw irq */
2375 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2377 ** Each time txeof runs without cleaning, but there
2378 ** are uncleaned descriptors it increments busy. If
2379 ** we get to 5 we declare it hung.
2381 if (que->busy == IXL_QUEUE_HUNG) {
2383 /* Mark the queue as inactive */
2384 vsi->active_queues &= ~((u64)1 << que->me);
2387 /* Check if we've come back from hung */
2388 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2389 vsi->active_queues |= ((u64)1 << que->me);
2391 if (que->busy >= IXL_MAX_TX_BUSY) {
2392 device_printf(dev,"Warning queue %d "
2393 "appears to be hung!\n", i);
2394 que->busy = IXL_QUEUE_HUNG;
2398 /* Only reset when all queues show hung */
2399 if (hung == vsi->num_queues)
2401 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2405 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2406 sc->init_state = IXLV_RESET_REQUIRED;
2407 ixlv_init_locked(sc);
2411 ** Note: this routine updates the OS on the link state
2412 ** the real check of the hardware only happens with
2413 ** a link interrupt.
2416 ixlv_update_link_status(struct ixlv_sc *sc)
2418 struct ixl_vsi *vsi = &sc->vsi;
2419 struct ifnet *ifp = vsi->ifp;
2420 device_t dev = sc->dev;
2423 if (vsi->link_active == FALSE) {
2425 device_printf(dev,"Link is Up, %d Gbps\n",
2426 (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2427 vsi->link_active = TRUE;
2428 if_link_state_change(ifp, LINK_STATE_UP);
2430 } else { /* Link down */
2431 if (vsi->link_active == TRUE) {
2433 device_printf(dev,"Link is Down\n");
2434 if_link_state_change(ifp, LINK_STATE_DOWN);
2435 vsi->link_active = FALSE;
2442 /*********************************************************************
2444 * This routine disables all traffic on the adapter by issuing a
2445 * global reset on the MAC and deallocates TX/RX buffers.
2447 **********************************************************************/
2450 ixlv_stop(struct ixlv_sc *sc)
2456 INIT_DBG_IF(ifp, "begin");
2458 IXLV_CORE_LOCK_ASSERT(sc);
2460 ixl_vc_flush(&sc->vc_mgr);
2461 ixlv_disable_queues(sc);
2464 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2465 ((ticks - start) < hz/10))
2466 ixlv_do_adminq_locked(sc);
2468 /* Stop the local timer */
2469 callout_stop(&sc->timer);
2471 INIT_DBG_IF(ifp, "end");
2475 /*********************************************************************
2477 * Free all station queue structs.
2479 **********************************************************************/
2481 ixlv_free_queues(struct ixl_vsi *vsi)
2483 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2484 struct ixl_queue *que = vsi->queues;
2486 for (int i = 0; i < vsi->num_queues; i++, que++) {
2487 struct tx_ring *txr = &que->txr;
2488 struct rx_ring *rxr = &que->rxr;
2490 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2493 ixl_free_que_tx(que);
2495 i40e_free_dma_mem(&sc->hw, &txr->dma);
2497 IXL_TX_LOCK_DESTROY(txr);
2499 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2502 ixl_free_que_rx(que);
2504 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2506 IXL_RX_LOCK_DESTROY(rxr);
2509 free(vsi->queues, M_DEVBUF);
2514 ** ixlv_config_rss - setup RSS
2516 ** RSS keys and table are cleared on VF reset.
2519 ixlv_config_rss(struct ixlv_sc *sc)
2521 struct i40e_hw *hw = &sc->hw;
2522 struct ixl_vsi *vsi = &sc->vsi;
2527 /* set up random bits */
2528 static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
2529 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
2530 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
2531 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
2534 /* Don't set up RSS if using a single queue */
2535 if (vsi->num_queues == 1) {
2536 wr32(hw, I40E_VFQF_HENA(0), 0);
2537 wr32(hw, I40E_VFQF_HENA(1), 0);
2542 /* Fill out hash function seed */
2543 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2544 wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
2546 /* Enable PCTYPES for RSS: */
2548 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2549 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2550 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2551 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2552 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2553 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2554 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2555 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2556 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2557 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2558 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2560 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2561 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2563 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2564 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2566 /* Populate the LUT with max no. of queues in round robin fashion */
2567 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; j++) {
2568 if (j == vsi->num_queues)
2570 /* lut = 4-byte sliding window of 4 lut entries */
2571 lut = (lut << 8) | (j & 0xF);
2572 /* On i = 3, we have 4 entries in lut; write to the register */
2574 wr32(hw, I40E_VFQF_HLUT(i), lut);
2575 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2584 ** This routine refreshes vlan filters, called by init
2585 ** it scans the filter table and then updates the AQ
2588 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2590 struct ixl_vsi *vsi = &sc->vsi;
2591 struct ixlv_vlan_filter *f;
2594 if (vsi->num_vlans == 0)
2597 ** Scan the filter table for vlan entries,
2598 ** and if found call for the AQ update.
2600 SLIST_FOREACH(f, sc->vlan_filters, next)
2601 if (f->flags & IXL_FILTER_ADD)
2604 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2605 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2610 ** This routine adds new MAC filters to the sc's list;
2611 ** these are later added in hardware by sending a virtual
2615 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2617 struct ixlv_mac_filter *f;
2618 device_t dev = sc->dev;
2620 /* Does one already exist? */
2621 f = ixlv_find_mac_filter(sc, macaddr);
2623 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2624 MAC_FORMAT_ARGS(macaddr));
2628 /* If not, get a new empty filter */
2629 f = ixlv_get_mac_filter(sc);
2631 device_printf(dev, "%s: no filters available!!\n",
2636 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2637 MAC_FORMAT_ARGS(macaddr));
2639 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2640 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2646 ** Marks a MAC filter for deletion.
2649 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2651 struct ixlv_mac_filter *f;
2653 f = ixlv_find_mac_filter(sc, macaddr);
2657 f->flags |= IXL_FILTER_DEL;
2662 ** Tasklet handler for MSIX Adminq interrupts
2663 ** - done outside interrupt context since it might sleep
2666 ixlv_do_adminq(void *context, int pending)
2668 struct ixlv_sc *sc = context;
2671 ixlv_do_adminq_locked(sc);
2672 mtx_unlock(&sc->mtx);
2677 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2679 struct i40e_hw *hw = &sc->hw;
2680 struct i40e_arq_event_info event;
2681 struct i40e_virtchnl_msg *v_msg;
2685 IXLV_CORE_LOCK_ASSERT(sc);
2687 event.buf_len = IXL_AQ_BUF_SZ;
2688 event.msg_buf = sc->aq_buffer;
2689 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2692 ret = i40e_clean_arq_element(hw, &event, &result);
2695 ixlv_vc_completion(sc, v_msg->v_opcode,
2696 v_msg->v_retval, event.msg_buf, event.msg_len);
2698 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2701 ixlv_enable_adminq_irq(hw);
2705 ixlv_add_sysctls(struct ixlv_sc *sc)
2707 device_t dev = sc->dev;
2708 struct ixl_vsi *vsi = &sc->vsi;
2709 struct i40e_eth_stats *es = &vsi->eth_stats;
2711 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2712 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2713 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2715 struct sysctl_oid *vsi_node, *queue_node;
2716 struct sysctl_oid_list *vsi_list, *queue_list;
2718 #define QUEUE_NAME_LEN 32
2719 char queue_namebuf[QUEUE_NAME_LEN];
2721 struct ixl_queue *queues = vsi->queues;
2722 struct tx_ring *txr;
2723 struct rx_ring *rxr;
2725 /* Driver statistics sysctls */
2726 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2727 CTLFLAG_RD, &sc->watchdog_events,
2728 "Watchdog timeouts");
2729 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2730 CTLFLAG_RD, &sc->admin_irq,
2731 "Admin Queue IRQ Handled");
2733 /* VSI statistics sysctls */
2734 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2735 CTLFLAG_RD, NULL, "VSI-specific statistics");
2736 vsi_list = SYSCTL_CHILDREN(vsi_node);
2738 struct ixl_sysctl_info ctls[] =
2740 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2741 {&es->rx_unicast, "ucast_pkts_rcvd",
2742 "Unicast Packets Received"},
2743 {&es->rx_multicast, "mcast_pkts_rcvd",
2744 "Multicast Packets Received"},
2745 {&es->rx_broadcast, "bcast_pkts_rcvd",
2746 "Broadcast Packets Received"},
2747 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2748 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2749 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2750 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2751 {&es->tx_multicast, "mcast_pkts_txd",
2752 "Multicast Packets Transmitted"},
2753 {&es->tx_broadcast, "bcast_pkts_txd",
2754 "Broadcast Packets Transmitted"},
2755 {&es->tx_errors, "tx_errors", "TX packet errors"},
2759 struct ixl_sysctl_info *entry = ctls;
2760 while (entry->stat != 0)
2762 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2763 CTLFLAG_RD, entry->stat,
2764 entry->description);
2769 for (int q = 0; q < vsi->num_queues; q++) {
2770 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2771 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2772 CTLFLAG_RD, NULL, "Queue Name");
2773 queue_list = SYSCTL_CHILDREN(queue_node);
2775 txr = &(queues[q].txr);
2776 rxr = &(queues[q].rxr);
2778 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2779 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2780 "m_defrag() failed");
2781 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2782 CTLFLAG_RD, &(queues[q].dropped_pkts),
2783 "Driver dropped packets");
2784 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2785 CTLFLAG_RD, &(queues[q].irqs),
2786 "irqs on this queue");
2787 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2788 CTLFLAG_RD, &(queues[q].tso),
2790 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2791 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2792 "Driver tx dma failure in xmit");
2793 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2794 CTLFLAG_RD, &(txr->no_desc),
2795 "Queue No Descriptor Available");
2796 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2797 CTLFLAG_RD, &(txr->total_packets),
2798 "Queue Packets Transmitted");
2799 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2800 CTLFLAG_RD, &(txr->tx_bytes),
2801 "Queue Bytes Transmitted");
2802 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2803 CTLFLAG_RD, &(rxr->rx_packets),
2804 "Queue Packets Received");
2805 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2806 CTLFLAG_RD, &(rxr->rx_bytes),
2807 "Queue Bytes Received");
2809 /* Examine queue state */
2810 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2811 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2812 sizeof(struct ixl_queue),
2813 ixlv_sysctl_qtx_tail_handler, "IU",
2814 "Queue Transmit Descriptor Tail");
2815 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2816 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2817 sizeof(struct ixl_queue),
2818 ixlv_sysctl_qrx_tail_handler, "IU",
2819 "Queue Receive Descriptor Tail");
2824 ixlv_init_filters(struct ixlv_sc *sc)
2826 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2827 M_DEVBUF, M_NOWAIT | M_ZERO);
2828 SLIST_INIT(sc->mac_filters);
2829 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2830 M_DEVBUF, M_NOWAIT | M_ZERO);
2831 SLIST_INIT(sc->vlan_filters);
2836 ixlv_free_filters(struct ixlv_sc *sc)
2838 struct ixlv_mac_filter *f;
2839 struct ixlv_vlan_filter *v;
2841 while (!SLIST_EMPTY(sc->mac_filters)) {
2842 f = SLIST_FIRST(sc->mac_filters);
2843 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2846 while (!SLIST_EMPTY(sc->vlan_filters)) {
2847 v = SLIST_FIRST(sc->vlan_filters);
2848 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2855 * ixlv_sysctl_qtx_tail_handler
2856 * Retrieves I40E_QTX_TAIL1 value from hardware
2860 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2862 struct ixl_queue *que;
2866 que = ((struct ixl_queue *)oidp->oid_arg1);
2869 val = rd32(que->vsi->hw, que->txr.tail);
2870 error = sysctl_handle_int(oidp, &val, 0, req);
2871 if (error || !req->newptr)
2877 * ixlv_sysctl_qrx_tail_handler
2878 * Retrieves I40E_QRX_TAIL1 value from hardware
2882 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2884 struct ixl_queue *que;
2888 que = ((struct ixl_queue *)oidp->oid_arg1);
2891 val = rd32(que->vsi->hw, que->rxr.tail);
2892 error = sysctl_handle_int(oidp, &val, 0, req);
2893 if (error || !req->newptr)