1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
41 #include <net/rss_config.h>
44 /*********************************************************************
46 *********************************************************************/
47 char ixlv_driver_version[] = "1.2.1";
49 /*********************************************************************
52 * Used by probe to select devices to load on
53 * Last field stores an index into ixlv_strings
54 * Last entry must be all 0s
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 *********************************************************************/
59 static ixl_vendor_info_t ixlv_vendor_info_array[] =
61 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
62 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
63 /* required last entry */
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
71 static char *ixlv_strings[] = {
72 "Intel(R) Ethernet Connection XL710 VF Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixlv_probe(device_t);
80 static int ixlv_attach(device_t);
81 static int ixlv_detach(device_t);
82 static int ixlv_shutdown(device_t);
83 static void ixlv_init_locked(struct ixlv_sc *);
84 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
85 static void ixlv_free_pci_resources(struct ixlv_sc *);
86 static int ixlv_assign_msix(struct ixlv_sc *);
87 static int ixlv_init_msix(struct ixlv_sc *);
88 static int ixlv_init_taskqueue(struct ixlv_sc *);
89 static int ixlv_setup_queues(struct ixlv_sc *);
90 static void ixlv_config_rss(struct ixlv_sc *);
91 static void ixlv_stop(struct ixlv_sc *);
92 static void ixlv_add_multi(struct ixl_vsi *);
93 static void ixlv_del_multi(struct ixl_vsi *);
94 static void ixlv_free_queues(struct ixl_vsi *);
95 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
97 static int ixlv_media_change(struct ifnet *);
98 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
100 static void ixlv_local_timer(void *);
102 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
103 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
104 static void ixlv_init_filters(struct ixlv_sc *);
105 static void ixlv_free_filters(struct ixlv_sc *);
107 static void ixlv_msix_que(void *);
108 static void ixlv_msix_adminq(void *);
109 static void ixlv_do_adminq(void *, int);
110 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
111 static void ixlv_handle_que(void *, int);
112 static int ixlv_reset(struct ixlv_sc *);
113 static int ixlv_reset_complete(struct i40e_hw *);
114 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
115 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
116 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
117 enum i40e_status_code);
119 static void ixlv_enable_adminq_irq(struct i40e_hw *);
120 static void ixlv_disable_adminq_irq(struct i40e_hw *);
121 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
122 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
124 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
125 static void ixlv_register_vlan(void *, struct ifnet *, u16);
126 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
128 static void ixlv_init_hw(struct ixlv_sc *);
129 static int ixlv_setup_vc(struct ixlv_sc *);
130 static int ixlv_vf_config(struct ixlv_sc *);
132 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
133 struct ifnet *, int);
135 static void ixlv_add_sysctls(struct ixlv_sc *);
136 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
137 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
139 /*********************************************************************
140 * FreeBSD Device Interface Entry Points
141 *********************************************************************/
143 static device_method_t ixlv_methods[] = {
144 /* Device interface */
145 DEVMETHOD(device_probe, ixlv_probe),
146 DEVMETHOD(device_attach, ixlv_attach),
147 DEVMETHOD(device_detach, ixlv_detach),
148 DEVMETHOD(device_shutdown, ixlv_shutdown),
152 static driver_t ixlv_driver = {
153 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
156 devclass_t ixlv_devclass;
157 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
159 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
160 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
163 ** TUNEABLE PARAMETERS:
166 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
167 "IXLV driver parameters");
170 ** Number of descriptors per ring:
171 ** - TX and RX are the same size
173 static int ixlv_ringsz = DEFAULT_RING;
174 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
175 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
176 &ixlv_ringsz, 0, "Descriptor Ring Size");
178 /* Set to zero to auto calculate */
179 int ixlv_max_queues = 0;
180 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
181 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
182 &ixlv_max_queues, 0, "Number of Queues");
185 ** Number of entries in Tx queue buf_ring.
186 ** Increasing this will reduce the number of
187 ** errors when transmitting fragmented UDP
190 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
191 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
192 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
193 &ixlv_txbrsz, 0, "TX Buf Ring Size");
196 ** Controls for Interrupt Throttling
197 ** - true/false for dynamic adjustment
198 ** - default values for static ITR
200 int ixlv_dynamic_rx_itr = 0;
201 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
202 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
203 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
205 int ixlv_dynamic_tx_itr = 0;
206 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
207 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
208 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
210 int ixlv_rx_itr = IXL_ITR_8K;
211 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
212 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
213 &ixlv_rx_itr, 0, "RX Interrupt Rate");
215 int ixlv_tx_itr = IXL_ITR_4K;
216 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
217 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
218 &ixlv_tx_itr, 0, "TX Interrupt Rate");
221 /*********************************************************************
222 * Device identification routine
224 * ixlv_probe determines if the driver should be loaded on
225 * the hardware based on PCI vendor/device id of the device.
227 * return BUS_PROBE_DEFAULT on success, positive on failure
228 *********************************************************************/
231 ixlv_probe(device_t dev)
233 ixl_vendor_info_t *ent;
235 u16 pci_vendor_id, pci_device_id;
236 u16 pci_subvendor_id, pci_subdevice_id;
237 char device_name[256];
239 INIT_DEBUGOUT("ixlv_probe: begin");
241 pci_vendor_id = pci_get_vendor(dev);
242 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
245 pci_device_id = pci_get_device(dev);
246 pci_subvendor_id = pci_get_subvendor(dev);
247 pci_subdevice_id = pci_get_subdevice(dev);
249 ent = ixlv_vendor_info_array;
250 while (ent->vendor_id != 0) {
251 if ((pci_vendor_id == ent->vendor_id) &&
252 (pci_device_id == ent->device_id) &&
254 ((pci_subvendor_id == ent->subvendor_id) ||
255 (ent->subvendor_id == 0)) &&
257 ((pci_subdevice_id == ent->subdevice_id) ||
258 (ent->subdevice_id == 0))) {
259 sprintf(device_name, "%s, Version - %s",
260 ixlv_strings[ent->index],
261 ixlv_driver_version);
262 device_set_desc_copy(dev, device_name);
263 return (BUS_PROBE_DEFAULT);
270 /*********************************************************************
271 * Device initialization routine
273 * The attach entry point is called when the driver is being loaded.
274 * This routine identifies the type of hardware, allocates all resources
275 * and initializes the hardware.
277 * return 0 on success, positive on failure
278 *********************************************************************/
281 ixlv_attach(device_t dev)
288 INIT_DBG_DEV(dev, "begin");
290 /* Allocate, clear, and link in our primary soft structure */
291 sc = device_get_softc(dev);
292 sc->dev = sc->osdep.dev = dev;
297 /* Initialize hw struct */
300 /* Allocate filter lists */
301 ixlv_init_filters(sc);
304 mtx_init(&sc->mtx, device_get_nameunit(dev),
305 "IXL SC Lock", MTX_DEF);
307 /* Set up the timer callout */
308 callout_init_mtx(&sc->timer, &sc->mtx, 0);
310 /* Do PCI setup - map BAR0, etc */
311 if (ixlv_allocate_pci_resources(sc)) {
312 device_printf(dev, "%s: Allocation of PCI resources failed\n",
318 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
320 error = i40e_set_mac_type(hw);
322 device_printf(dev, "%s: set_mac_type failed: %d\n",
327 error = ixlv_reset_complete(hw);
329 device_printf(dev, "%s: Device is still being reset\n",
334 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
336 error = ixlv_setup_vc(sc);
338 device_printf(dev, "%s: Error setting up PF comms, %d\n",
343 INIT_DBG_DEV(dev, "PF API version verified");
345 /* TODO: Figure out why MDD events occur when this reset is removed. */
346 /* Need API version before sending reset message */
347 error = ixlv_reset(sc);
349 device_printf(dev, "VF reset failed; reload the driver\n");
353 INIT_DBG_DEV(dev, "VF reset complete");
355 /* Ask for VF config from PF */
356 error = ixlv_vf_config(sc);
358 device_printf(dev, "Error getting configuration from PF: %d\n",
363 INIT_DBG_DEV(dev, "VF config from PF:");
364 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
365 sc->vf_res->num_vsis,
366 sc->vf_res->num_queue_pairs,
367 sc->vf_res->max_vectors,
368 sc->vf_res->max_mtu);
369 INIT_DBG_DEV(dev, "Offload flags: %#010x",
370 sc->vf_res->vf_offload_flags);
372 // TODO: Move this into ixlv_vf_config?
373 /* got VF config message back from PF, now we can parse it */
374 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
375 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
376 sc->vsi_res = &sc->vf_res->vsi_res[i];
379 device_printf(dev, "%s: no LAN VSI found\n", __func__);
384 INIT_DBG_DEV(dev, "Resource Acquisition complete");
386 /* If no mac address was assigned just make a random one */
387 if (!ixlv_check_ether_addr(hw->mac.addr)) {
388 u8 addr[ETHER_ADDR_LEN];
389 arc4rand(&addr, sizeof(addr), 0);
392 bcopy(addr, hw->mac.addr, sizeof(addr));
395 vsi->id = sc->vsi_res->vsi_id;
396 vsi->back = (void *)sc;
399 /* This allocates the memory and early settings */
400 if (ixlv_setup_queues(sc) != 0) {
401 device_printf(dev, "%s: setup queues failed!\n",
407 /* Setup the stack interface */
408 if (ixlv_setup_interface(dev, sc) != 0) {
409 device_printf(dev, "%s: setup interface failed!\n",
415 INIT_DBG_DEV(dev, "Queue memory and interface setup");
417 /* Do queue interrupt setup */
418 ixlv_assign_msix(sc);
420 /* Start AdminQ taskqueue */
421 ixlv_init_taskqueue(sc);
423 /* Initialize stats */
424 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
425 ixlv_add_sysctls(sc);
427 /* Register for VLAN events */
428 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
429 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
430 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
431 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433 /* We want AQ enabled early */
434 ixlv_enable_adminq_irq(hw);
436 /* Set things up to run init */
437 sc->init_state = IXLV_INIT_READY;
439 ixl_vc_init_mgr(sc, &sc->vc_mgr);
441 INIT_DBG_DEV(dev, "end");
445 ixlv_free_queues(vsi);
447 free(sc->vf_res, M_DEVBUF);
449 i40e_shutdown_adminq(hw);
451 ixlv_free_pci_resources(sc);
453 mtx_destroy(&sc->mtx);
454 ixlv_free_filters(sc);
455 INIT_DBG_DEV(dev, "end: error %d", error);
459 /*********************************************************************
460 * Device removal routine
462 * The detach entry point is called when the driver is being removed.
463 * This routine stops the adapter and deallocates all the resources
464 * that were allocated for driver operation.
466 * return 0 on success, positive on failure
467 *********************************************************************/
470 ixlv_detach(device_t dev)
472 struct ixlv_sc *sc = device_get_softc(dev);
473 struct ixl_vsi *vsi = &sc->vsi;
475 INIT_DBG_DEV(dev, "begin");
477 /* Make sure VLANS are not using driver */
478 if (vsi->ifp->if_vlantrunk != NULL) {
479 device_printf(dev, "Vlan in use, detach first\n");
480 INIT_DBG_DEV(dev, "end");
485 ether_ifdetach(vsi->ifp);
486 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
489 mtx_unlock(&sc->mtx);
492 /* Unregister VLAN events */
493 if (vsi->vlan_attach != NULL)
494 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
495 if (vsi->vlan_detach != NULL)
496 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
499 callout_drain(&sc->vc_mgr.callout);
501 i40e_shutdown_adminq(&sc->hw);
502 taskqueue_free(sc->tq);
504 free(sc->vf_res, M_DEVBUF);
505 ixlv_free_pci_resources(sc);
506 ixlv_free_queues(vsi);
507 mtx_destroy(&sc->mtx);
508 ixlv_free_filters(sc);
510 bus_generic_detach(dev);
511 INIT_DBG_DEV(dev, "end");
515 /*********************************************************************
517 * Shutdown entry point
519 **********************************************************************/
522 ixlv_shutdown(device_t dev)
524 struct ixlv_sc *sc = device_get_softc(dev);
526 INIT_DBG_DEV(dev, "begin");
530 mtx_unlock(&sc->mtx);
532 INIT_DBG_DEV(dev, "end");
537 * Configure TXCSUM(IPV6) and TSO(4/6)
538 * - the hardware handles these together so we
542 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
544 /* Enable/disable TXCSUM/TSO4 */
545 if (!(ifp->if_capenable & IFCAP_TXCSUM)
546 && !(ifp->if_capenable & IFCAP_TSO4)) {
547 if (mask & IFCAP_TXCSUM) {
548 ifp->if_capenable |= IFCAP_TXCSUM;
549 /* enable TXCSUM, restore TSO if previously enabled */
550 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
551 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
552 ifp->if_capenable |= IFCAP_TSO4;
555 else if (mask & IFCAP_TSO4) {
556 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
557 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
559 "TSO4 requires txcsum, enabling both...\n");
561 } else if((ifp->if_capenable & IFCAP_TXCSUM)
562 && !(ifp->if_capenable & IFCAP_TSO4)) {
563 if (mask & IFCAP_TXCSUM)
564 ifp->if_capenable &= ~IFCAP_TXCSUM;
565 else if (mask & IFCAP_TSO4)
566 ifp->if_capenable |= IFCAP_TSO4;
567 } else if((ifp->if_capenable & IFCAP_TXCSUM)
568 && (ifp->if_capenable & IFCAP_TSO4)) {
569 if (mask & IFCAP_TXCSUM) {
570 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
571 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
573 "TSO4 requires txcsum, disabling both...\n");
574 } else if (mask & IFCAP_TSO4)
575 ifp->if_capenable &= ~IFCAP_TSO4;
578 /* Enable/disable TXCSUM_IPV6/TSO6 */
579 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
580 && !(ifp->if_capenable & IFCAP_TSO6)) {
581 if (mask & IFCAP_TXCSUM_IPV6) {
582 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
583 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
584 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
585 ifp->if_capenable |= IFCAP_TSO6;
587 } else if (mask & IFCAP_TSO6) {
588 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
589 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
591 "TSO6 requires txcsum6, enabling both...\n");
593 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
594 && !(ifp->if_capenable & IFCAP_TSO6)) {
595 if (mask & IFCAP_TXCSUM_IPV6)
596 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
597 else if (mask & IFCAP_TSO6)
598 ifp->if_capenable |= IFCAP_TSO6;
599 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
600 && (ifp->if_capenable & IFCAP_TSO6)) {
601 if (mask & IFCAP_TXCSUM_IPV6) {
602 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
603 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
605 "TSO6 requires txcsum6, disabling both...\n");
606 } else if (mask & IFCAP_TSO6)
607 ifp->if_capenable &= ~IFCAP_TSO6;
611 /*********************************************************************
614 * ixlv_ioctl is called when the user wants to configure the
617 * return 0 on success, positive on failure
618 **********************************************************************/
621 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
623 struct ixl_vsi *vsi = ifp->if_softc;
624 struct ixlv_sc *sc = vsi->back;
625 struct ifreq *ifr = (struct ifreq *)data;
626 #if defined(INET) || defined(INET6)
627 struct ifaddr *ifa = (struct ifaddr *)data;
628 bool avoid_reset = FALSE;
637 if (ifa->ifa_addr->sa_family == AF_INET)
641 if (ifa->ifa_addr->sa_family == AF_INET6)
644 #if defined(INET) || defined(INET6)
646 ** Calling init results in link renegotiation,
647 ** so we avoid doing it when possible.
650 ifp->if_flags |= IFF_UP;
651 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
654 if (!(ifp->if_flags & IFF_NOARP))
655 arp_ifinit(ifp, ifa);
658 error = ether_ioctl(ifp, command, data);
662 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
664 if (ifr->ifr_mtu > IXL_MAX_FRAME -
665 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
667 IOCTL_DBG_IF(ifp, "mtu too large");
669 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
670 // ERJ: Interestingly enough, these types don't match
671 ifp->if_mtu = (u_long)ifr->ifr_mtu;
672 vsi->max_frame_size =
673 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
674 + ETHER_VLAN_ENCAP_LEN;
675 ixlv_init_locked(sc);
677 mtx_unlock(&sc->mtx);
680 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
682 if (ifp->if_flags & IFF_UP) {
683 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
684 ixlv_init_locked(sc);
686 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
688 sc->if_flags = ifp->if_flags;
689 mtx_unlock(&sc->mtx);
692 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
693 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
695 ixlv_disable_intr(vsi);
697 ixlv_enable_intr(vsi);
698 mtx_unlock(&sc->mtx);
702 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
703 if (sc->init_state == IXLV_RUNNING) {
705 ixlv_disable_intr(vsi);
707 ixlv_enable_intr(vsi);
708 mtx_unlock(&sc->mtx);
713 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
714 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
718 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
719 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
721 ixlv_cap_txcsum_tso(vsi, ifp, mask);
723 if (mask & IFCAP_RXCSUM)
724 ifp->if_capenable ^= IFCAP_RXCSUM;
725 if (mask & IFCAP_RXCSUM_IPV6)
726 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
727 if (mask & IFCAP_LRO)
728 ifp->if_capenable ^= IFCAP_LRO;
729 if (mask & IFCAP_VLAN_HWTAGGING)
730 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
731 if (mask & IFCAP_VLAN_HWFILTER)
732 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
733 if (mask & IFCAP_VLAN_HWTSO)
734 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
738 VLAN_CAPABILITIES(ifp);
744 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
745 error = ether_ioctl(ifp, command, data);
753 ** To do a reinit on the VF is unfortunately more complicated
754 ** than a physical device, we must have the PF more or less
755 ** completely recreate our memory, so many things that were
756 ** done only once at attach in traditional drivers now must be
757 ** redone at each reinitialization. This function does that
758 ** 'prelude' so we can then call the normal locked init code.
761 ixlv_reinit_locked(struct ixlv_sc *sc)
763 struct i40e_hw *hw = &sc->hw;
764 struct ixl_vsi *vsi = &sc->vsi;
765 struct ifnet *ifp = vsi->ifp;
766 struct ixlv_mac_filter *mf, *mf_temp;
767 struct ixlv_vlan_filter *vf;
770 INIT_DBG_IF(ifp, "begin");
772 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
775 error = ixlv_reset(sc);
777 INIT_DBG_IF(ifp, "VF was reset");
779 /* set the state in case we went thru RESET */
780 sc->init_state = IXLV_RUNNING;
783 ** Resetting the VF drops all filters from hardware;
784 ** we need to mark them to be re-added in init.
786 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
787 if (mf->flags & IXL_FILTER_DEL) {
788 SLIST_REMOVE(sc->mac_filters, mf,
789 ixlv_mac_filter, next);
792 mf->flags |= IXL_FILTER_ADD;
794 if (vsi->num_vlans != 0)
795 SLIST_FOREACH(vf, sc->vlan_filters, next)
796 vf->flags = IXL_FILTER_ADD;
797 else { /* clean any stale filters */
798 while (!SLIST_EMPTY(sc->vlan_filters)) {
799 vf = SLIST_FIRST(sc->vlan_filters);
800 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
805 ixlv_enable_adminq_irq(hw);
806 ixl_vc_flush(&sc->vc_mgr);
808 INIT_DBG_IF(ifp, "end");
813 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
814 enum i40e_status_code code)
821 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
822 * happens while a command is in progress, so we don't print an error
825 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
826 if_printf(sc->vsi.ifp,
827 "Error %d waiting for PF to complete operation %d\n",
833 ixlv_init_locked(struct ixlv_sc *sc)
835 struct i40e_hw *hw = &sc->hw;
836 struct ixl_vsi *vsi = &sc->vsi;
837 struct ixl_queue *que = vsi->queues;
838 struct ifnet *ifp = vsi->ifp;
841 INIT_DBG_IF(ifp, "begin");
843 IXLV_CORE_LOCK_ASSERT(sc);
845 /* Do a reinit first if an init has already been done */
846 if ((sc->init_state == IXLV_RUNNING) ||
847 (sc->init_state == IXLV_RESET_REQUIRED) ||
848 (sc->init_state == IXLV_RESET_PENDING))
849 error = ixlv_reinit_locked(sc);
850 /* Don't bother with init if we failed reinit */
854 /* Remove existing MAC filter if new MAC addr is set */
855 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
856 error = ixlv_del_mac_filter(sc, hw->mac.addr);
858 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
859 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
863 /* Check for an LAA mac address... */
864 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
866 ifp->if_hwassist = 0;
867 if (ifp->if_capenable & IFCAP_TSO)
868 ifp->if_hwassist |= CSUM_TSO;
869 if (ifp->if_capenable & IFCAP_TXCSUM)
870 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
871 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
874 /* Add mac filter for this VF to PF */
875 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
876 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
877 if (!error || error == EEXIST)
878 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
879 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
883 /* Setup vlan's if needed */
884 ixlv_setup_vlan_filters(sc);
886 /* Prepare the queues for operation */
887 for (int i = 0; i < vsi->num_queues; i++, que++) {
888 struct rx_ring *rxr = &que->rxr;
890 ixl_init_tx_ring(que);
892 if (vsi->max_frame_size <= 2048)
893 rxr->mbuf_sz = MCLBYTES;
895 rxr->mbuf_sz = MJUMPAGESIZE;
896 ixl_init_rx_ring(que);
899 /* Configure queues */
900 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
901 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
907 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
908 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
911 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
912 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
914 /* Start the local timer */
915 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
917 sc->init_state = IXLV_RUNNING;
920 INIT_DBG_IF(ifp, "end");
925 ** Init entry point for the stack
930 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
931 struct ixlv_sc *sc = vsi->back;
935 ixlv_init_locked(sc);
936 mtx_unlock(&sc->mtx);
938 /* Wait for init_locked to finish */
939 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
940 && ++retries < 100) {
943 if (retries >= IXLV_AQ_MAX_ERR)
945 "Init failed to complete in alloted time!\n");
949 * ixlv_attach() helper function; gathers information about
950 * the (virtual) hardware for use elsewhere in the driver.
953 ixlv_init_hw(struct ixlv_sc *sc)
955 struct i40e_hw *hw = &sc->hw;
956 device_t dev = sc->dev;
958 /* Save off the information about this board */
959 hw->vendor_id = pci_get_vendor(dev);
960 hw->device_id = pci_get_device(dev);
961 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
962 hw->subsystem_vendor_id =
963 pci_read_config(dev, PCIR_SUBVEND_0, 2);
964 hw->subsystem_device_id =
965 pci_read_config(dev, PCIR_SUBDEV_0, 2);
967 hw->bus.device = pci_get_slot(dev);
968 hw->bus.func = pci_get_function(dev);
972 * ixlv_attach() helper function; initalizes the admin queue
973 * and attempts to establish contact with the PF by
974 * retrying the initial "API version" message several times
975 * or until the PF responds.
978 ixlv_setup_vc(struct ixlv_sc *sc)
980 struct i40e_hw *hw = &sc->hw;
981 device_t dev = sc->dev;
982 int error = 0, ret_error = 0, asq_retries = 0;
983 bool send_api_ver_retried = 0;
985 /* Need to set these AQ paramters before initializing AQ */
986 hw->aq.num_arq_entries = IXL_AQ_LEN;
987 hw->aq.num_asq_entries = IXL_AQ_LEN;
988 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
989 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
991 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
992 /* Initialize admin queue */
993 error = i40e_init_adminq(hw);
995 device_printf(dev, "%s: init_adminq failed: %d\n",
1001 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1004 /* Send VF's API version */
1005 error = ixlv_send_api_ver(sc);
1007 i40e_shutdown_adminq(hw);
1009 device_printf(dev, "%s: unable to send api"
1010 " version to PF on attempt %d, error %d\n",
1011 __func__, i+1, error);
1015 while (!i40e_asq_done(hw)) {
1016 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1017 i40e_shutdown_adminq(hw);
1018 DDPRINTF(dev, "Admin Queue timeout "
1019 "(waiting for send_api_ver), %d more retries...",
1020 IXLV_AQ_MAX_ERR - (i + 1));
1024 i40e_msec_delay(10);
1026 if (asq_retries > IXLV_AQ_MAX_ERR)
1029 INIT_DBG_DEV(dev, "Sent API version message to PF");
1031 /* Verify that the VF accepts the PF's API version */
1032 error = ixlv_verify_api_ver(sc);
1033 if (error == ETIMEDOUT) {
1034 if (!send_api_ver_retried) {
1035 /* Resend message, one more time */
1036 send_api_ver_retried++;
1038 "%s: Timeout while verifying API version on first"
1039 " try!\n", __func__);
1043 "%s: Timeout while verifying API version on second"
1044 " try!\n", __func__);
1051 "%s: Unable to verify API version,"
1052 " error %d\n", __func__, error);
1059 i40e_shutdown_adminq(hw);
1064 * ixlv_attach() helper function; asks the PF for this VF's
1065 * configuration, and saves the information if it receives it.
1068 ixlv_vf_config(struct ixlv_sc *sc)
1070 struct i40e_hw *hw = &sc->hw;
1071 device_t dev = sc->dev;
1072 int bufsz, error = 0, ret_error = 0;
1073 int asq_retries, retried = 0;
1076 error = ixlv_send_vf_config_msg(sc);
1079 "%s: Unable to send VF config request, attempt %d,"
1080 " error %d\n", __func__, retried + 1, error);
1085 while (!i40e_asq_done(hw)) {
1086 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1087 device_printf(dev, "%s: Admin Queue timeout "
1088 "(waiting for send_vf_config_msg), attempt %d\n",
1089 __func__, retried + 1);
1093 i40e_msec_delay(10);
1096 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1100 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1101 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1102 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1105 "%s: Unable to allocate memory for VF configuration"
1106 " message from PF on attempt %d\n", __func__, retried + 1);
1112 /* Check for VF config response */
1113 error = ixlv_get_vf_config(sc);
1114 if (error == ETIMEDOUT) {
1115 /* The 1st time we timeout, send the configuration message again */
1123 "%s: Unable to get VF configuration from PF after %d tries!\n",
1124 __func__, retried + 1);
1130 free(sc->vf_res, M_DEVBUF);
1136 * Allocate MSI/X vectors, setup the AQ vector early
1139 ixlv_init_msix(struct ixlv_sc *sc)
1141 device_t dev = sc->dev;
1142 int rid, want, vectors, queues, available;
1144 rid = PCIR_BAR(IXL_BAR);
1145 sc->msix_mem = bus_alloc_resource_any(dev,
1146 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1147 if (!sc->msix_mem) {
1148 /* May not be enabled */
1149 device_printf(sc->dev,
1150 "Unable to map MSIX table \n");
1154 available = pci_msix_count(dev);
1155 if (available == 0) { /* system has msix disabled */
1156 bus_release_resource(dev, SYS_RES_MEMORY,
1158 sc->msix_mem = NULL;
1162 /* Figure out a reasonable auto config value */
1163 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1165 /* Override with hardcoded value if sane */
1166 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1167 queues = ixlv_max_queues;
1169 /* If we're doing RSS, clamp at the number of RSS buckets */
1170 if (queues > rss_getnumbuckets())
1171 queues = rss_getnumbuckets();
1173 /* Enforce the VF max value */
1174 if (queues > IXLV_MAX_QUEUES)
1175 queues = IXLV_MAX_QUEUES;
1178 ** Want one vector (RX/TX pair) per queue
1179 ** plus an additional for the admin queue.
1182 if (want <= available) /* Have enough */
1185 device_printf(sc->dev,
1186 "MSIX Configuration Problem, "
1187 "%d vectors available but %d wanted!\n",
1194 * If we're doing RSS, the number of queues needs to
1195 * match the number of RSS buckets that are configured.
1197 * + If there's more queues than RSS buckets, we'll end
1198 * up with queues that get no traffic.
1200 * + If there's more RSS buckets than queues, we'll end
1201 * up having multiple RSS buckets map to the same queue,
1202 * so there'll be some contention.
1204 if (queues != rss_getnumbuckets()) {
1206 "%s: queues (%d) != RSS buckets (%d)"
1207 "; performance will be impacted.\n",
1208 __func__, queues, rss_getnumbuckets());
1212 if (pci_alloc_msix(dev, &vectors) == 0) {
1213 device_printf(sc->dev,
1214 "Using MSIX interrupts with %d vectors\n", vectors);
1216 sc->vsi.num_queues = queues;
1220 ** Explicitly set the guest PCI BUSMASTER capability
1221 ** and we must rewrite the ENABLE in the MSIX control
1222 ** register again at this point to cause the host to
1223 ** successfully initialize us.
1228 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1229 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1230 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1231 pci_find_cap(dev, PCIY_MSIX, &rid);
1232 rid += PCIR_MSIX_CTRL;
1233 msix_ctrl = pci_read_config(dev, rid, 2);
1234 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1235 pci_write_config(dev, rid, msix_ctrl, 2);
1238 /* Next we need to setup the vector for the Admin Queue */
1239 rid = 1; // zero vector + 1
1240 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1241 &rid, RF_SHAREABLE | RF_ACTIVE);
1242 if (sc->res == NULL) {
1243 device_printf(dev,"Unable to allocate"
1244 " bus resource: AQ interrupt \n");
1247 if (bus_setup_intr(dev, sc->res,
1248 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1249 ixlv_msix_adminq, sc, &sc->tag)) {
1251 device_printf(dev, "Failed to register AQ handler");
1254 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1259 /* The VF driver MUST use MSIX */
1264 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1267 device_t dev = sc->dev;
1270 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1273 if (!(sc->pci_mem)) {
1274 device_printf(dev,"Unable to allocate bus resource: memory\n");
1278 sc->osdep.mem_bus_space_tag =
1279 rman_get_bustag(sc->pci_mem);
1280 sc->osdep.mem_bus_space_handle =
1281 rman_get_bushandle(sc->pci_mem);
1282 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1283 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1284 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1286 sc->hw.back = &sc->osdep;
1288 /* Disable adminq interrupts */
1289 ixlv_disable_adminq_irq(&sc->hw);
1292 ** Now setup MSI/X, it will return
1293 ** us the number of supported vectors
1295 sc->msix = ixlv_init_msix(sc);
1297 /* We fail without MSIX support */
1305 ixlv_free_pci_resources(struct ixlv_sc *sc)
1307 struct ixl_vsi *vsi = &sc->vsi;
1308 struct ixl_queue *que = vsi->queues;
1309 device_t dev = sc->dev;
1311 /* We may get here before stations are setup */
1316 ** Release all msix queue resources:
1318 for (int i = 0; i < vsi->num_queues; i++, que++) {
1319 int rid = que->msix + 1;
1320 if (que->tag != NULL) {
1321 bus_teardown_intr(dev, que->res, que->tag);
1324 if (que->res != NULL)
1325 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1329 /* Clean the AdminQ interrupt */
1330 if (sc->tag != NULL) {
1331 bus_teardown_intr(dev, sc->res, sc->tag);
1334 if (sc->res != NULL)
1335 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1337 pci_release_msi(dev);
1339 if (sc->msix_mem != NULL)
1340 bus_release_resource(dev, SYS_RES_MEMORY,
1341 PCIR_BAR(IXL_BAR), sc->msix_mem);
1343 if (sc->pci_mem != NULL)
1344 bus_release_resource(dev, SYS_RES_MEMORY,
1345 PCIR_BAR(0), sc->pci_mem);
1351 * Create taskqueue and tasklet for Admin Queue interrupts.
1354 ixlv_init_taskqueue(struct ixlv_sc *sc)
1358 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1360 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1361 taskqueue_thread_enqueue, &sc->tq);
1362 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1363 device_get_nameunit(sc->dev));
1368 /*********************************************************************
1370 * Setup MSIX Interrupt resources and handlers for the VSI queues
1372 **********************************************************************/
1374 ixlv_assign_msix(struct ixlv_sc *sc)
1376 device_t dev = sc->dev;
1377 struct ixl_vsi *vsi = &sc->vsi;
1378 struct ixl_queue *que = vsi->queues;
1379 struct tx_ring *txr;
1380 int error, rid, vector = 1;
1382 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1386 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1387 RF_SHAREABLE | RF_ACTIVE);
1388 if (que->res == NULL) {
1389 device_printf(dev,"Unable to allocate"
1390 " bus resource: que interrupt [%d]\n", vector);
1393 /* Set the handler function */
1394 error = bus_setup_intr(dev, que->res,
1395 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1396 ixlv_msix_que, que, &que->tag);
1399 device_printf(dev, "Failed to register que handler");
1402 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1403 /* Bind the vector to a CPU */
1405 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1407 bus_bind_intr(dev, que->res, cpu_id);
1409 vsi->que_mask |= (u64)(1 << que->msix);
1410 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1411 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1412 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1413 taskqueue_thread_enqueue, &que->tq);
1415 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1416 cpu_id, "%s (bucket %d)",
1417 device_get_nameunit(dev), cpu_id);
1419 taskqueue_start_threads(&que->tq, 1, PI_NET,
1420 "%s que", device_get_nameunit(dev));
1429 ** Requests a VF reset from the PF.
1431 ** Requires the VF's Admin Queue to be initialized.
1434 ixlv_reset(struct ixlv_sc *sc)
1436 struct i40e_hw *hw = &sc->hw;
1437 device_t dev = sc->dev;
1440 /* Ask the PF to reset us if we are initiating */
1441 if (sc->init_state != IXLV_RESET_PENDING)
1442 ixlv_request_reset(sc);
1444 i40e_msec_delay(100);
1445 error = ixlv_reset_complete(hw);
1447 device_printf(dev, "%s: VF reset failed\n",
1452 error = i40e_shutdown_adminq(hw);
1454 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1459 error = i40e_init_adminq(hw);
1461 device_printf(dev, "%s: init_adminq failed: %d\n",
1470 ixlv_reset_complete(struct i40e_hw *hw)
1474 for (int i = 0; i < 100; i++) {
1475 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1476 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1478 if ((reg == I40E_VFR_VFACTIVE) ||
1479 (reg == I40E_VFR_COMPLETED))
1481 i40e_msec_delay(100);
1488 /*********************************************************************
1490 * Setup networking device structure and register an interface.
1492 **********************************************************************/
1494 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1497 struct ixl_vsi *vsi = &sc->vsi;
1498 struct ixl_queue *que = vsi->queues;
1500 INIT_DBG_DEV(dev, "begin");
1502 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1504 device_printf(dev, "%s: could not allocate ifnet"
1505 " structure!\n", __func__);
1509 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1511 ifp->if_mtu = ETHERMTU;
1512 ifp->if_baudrate = 4000000000; // ??
1513 ifp->if_init = ixlv_init;
1514 ifp->if_softc = vsi;
1515 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1516 ifp->if_ioctl = ixlv_ioctl;
1518 #if __FreeBSD_version >= 1100000
1519 if_setgetcounterfn(ifp, ixl_get_counter);
1522 ifp->if_transmit = ixl_mq_start;
1524 ifp->if_qflush = ixl_qflush;
1525 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1527 ether_ifattach(ifp, sc->hw.mac.addr);
1529 vsi->max_frame_size =
1530 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1531 + ETHER_VLAN_ENCAP_LEN;
1534 * Tell the upper layer(s) we support long frames.
1536 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1538 ifp->if_capabilities |= IFCAP_HWCSUM;
1539 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1540 ifp->if_capabilities |= IFCAP_TSO;
1541 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1543 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1548 ifp->if_capenable = ifp->if_capabilities;
1551 ** Don't turn this on by default, if vlans are
1552 ** created on another pseudo device (eg. lagg)
1553 ** then vlan events are not passed thru, breaking
1554 ** operation, but with HW FILTER off it works. If
1555 ** using vlans directly on the ixl driver you can
1556 ** enable this and get full hardware tag filtering.
1558 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1561 * Specify the media types supported by this adapter and register
1562 * callbacks to update media and link information
1564 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1567 // JFV Add media types later?
1569 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1570 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1572 INIT_DBG_DEV(dev, "end");
1577 ** Allocate and setup the interface queues
1580 ixlv_setup_queues(struct ixlv_sc *sc)
1582 device_t dev = sc->dev;
1583 struct ixl_vsi *vsi;
1584 struct ixl_queue *que;
1585 struct tx_ring *txr;
1586 struct rx_ring *rxr;
1588 int error = I40E_SUCCESS;
1591 vsi->back = (void *)sc;
1595 /* Get memory for the station queues */
1597 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1598 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1599 device_printf(dev, "Unable to allocate queue memory\n");
1604 for (int i = 0; i < vsi->num_queues; i++) {
1605 que = &vsi->queues[i];
1606 que->num_desc = ixlv_ringsz;
1609 /* mark the queue as active */
1610 vsi->active_queues |= (u64)1 << que->me;
1614 txr->tail = I40E_QTX_TAIL1(que->me);
1615 /* Initialize the TX lock */
1616 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1617 device_get_nameunit(dev), que->me);
1618 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1620 ** Create the TX descriptor ring, the extra int is
1621 ** added as the location for HEAD WB.
1623 tsize = roundup2((que->num_desc *
1624 sizeof(struct i40e_tx_desc)) +
1625 sizeof(u32), DBA_ALIGN);
1626 if (i40e_allocate_dma_mem(&sc->hw,
1627 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1629 "Unable to allocate TX Descriptor memory\n");
1633 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1634 bzero((void *)txr->base, tsize);
1635 /* Now allocate transmit soft structs for the ring */
1636 if (ixl_allocate_tx_data(que)) {
1638 "Critical Failure setting up TX structures\n");
1642 /* Allocate a buf ring */
1643 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1644 M_WAITOK, &txr->mtx);
1645 if (txr->br == NULL) {
1647 "Critical Failure setting up TX buf ring\n");
1653 * Next the RX queues...
1655 rsize = roundup2(que->num_desc *
1656 sizeof(union i40e_rx_desc), DBA_ALIGN);
1659 rxr->tail = I40E_QRX_TAIL1(que->me);
1661 /* Initialize the RX side lock */
1662 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1663 device_get_nameunit(dev), que->me);
1664 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1666 if (i40e_allocate_dma_mem(&sc->hw,
1667 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1669 "Unable to allocate RX Descriptor memory\n");
1673 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1674 bzero((void *)rxr->base, rsize);
1676 /* Allocate receive soft structs for the ring*/
1677 if (ixl_allocate_rx_data(que)) {
1679 "Critical Failure setting up receive structs\n");
1688 for (int i = 0; i < vsi->num_queues; i++) {
1689 que = &vsi->queues[i];
1693 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1695 i40e_free_dma_mem(&sc->hw, &txr->dma);
1697 free(vsi->queues, M_DEVBUF);
1704 ** This routine is run via an vlan config EVENT,
1705 ** it enables us to use the HW Filter table since
1706 ** we can get the vlan id. This just creates the
1707 ** entry in the soft version of the VFTA, init will
1708 ** repopulate the real table.
1711 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1713 struct ixl_vsi *vsi = ifp->if_softc;
1714 struct ixlv_sc *sc = vsi->back;
1715 struct ixlv_vlan_filter *v;
1718 if (ifp->if_softc != arg) /* Not our event */
1721 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1724 /* Sanity check - make sure it doesn't already exist */
1725 SLIST_FOREACH(v, sc->vlan_filters, next) {
1726 if (v->vlan == vtag)
1732 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1733 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1735 v->flags = IXL_FILTER_ADD;
1736 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1737 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1738 mtx_unlock(&sc->mtx);
1743 ** This routine is run via an vlan
1744 ** unconfig EVENT, remove our entry
1745 ** in the soft vfta.
1748 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1750 struct ixl_vsi *vsi = ifp->if_softc;
1751 struct ixlv_sc *sc = vsi->back;
1752 struct ixlv_vlan_filter *v;
1755 if (ifp->if_softc != arg)
1758 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1762 SLIST_FOREACH(v, sc->vlan_filters, next) {
1763 if (v->vlan == vtag) {
1764 v->flags = IXL_FILTER_DEL;
1770 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1771 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1772 mtx_unlock(&sc->mtx);
1777 ** Get a new filter and add it to the mac filter list.
1779 static struct ixlv_mac_filter *
1780 ixlv_get_mac_filter(struct ixlv_sc *sc)
1782 struct ixlv_mac_filter *f;
1784 f = malloc(sizeof(struct ixlv_mac_filter),
1785 M_DEVBUF, M_NOWAIT | M_ZERO);
1787 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1793 ** Find the filter with matching MAC address
1795 static struct ixlv_mac_filter *
1796 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1798 struct ixlv_mac_filter *f;
1801 SLIST_FOREACH(f, sc->mac_filters, next) {
1802 if (cmp_etheraddr(f->macaddr, macaddr)) {
1814 ** Admin Queue interrupt handler
1817 ixlv_msix_adminq(void *arg)
1819 struct ixlv_sc *sc = arg;
1820 struct i40e_hw *hw = &sc->hw;
1821 device_t dev = sc->dev;
1822 u32 reg, mask, oldreg;
1824 reg = rd32(hw, I40E_VFINT_ICR01);
1825 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1827 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1828 reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1829 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1831 /* check for Admin queue errors */
1832 oldreg = reg = rd32(hw, hw->aq.arq.len);
1833 if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1834 device_printf(dev, "ARQ VF Error detected\n");
1835 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1837 if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1838 device_printf(dev, "ARQ Overflow Error detected\n");
1839 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1841 if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1842 device_printf(dev, "ARQ Critical Error detected\n");
1843 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1846 wr32(hw, hw->aq.arq.len, reg);
1848 oldreg = reg = rd32(hw, hw->aq.asq.len);
1849 if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1850 device_printf(dev, "ASQ VF Error detected\n");
1851 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1853 if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1854 device_printf(dev, "ASQ Overflow Error detected\n");
1855 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1857 if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1858 device_printf(dev, "ASQ Critical Error detected\n");
1859 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1862 wr32(hw, hw->aq.asq.len, reg);
1864 /* re-enable interrupt causes */
1865 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1866 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1869 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1874 ixlv_enable_intr(struct ixl_vsi *vsi)
1876 struct i40e_hw *hw = vsi->hw;
1877 struct ixl_queue *que = vsi->queues;
1879 ixlv_enable_adminq_irq(hw);
1880 for (int i = 0; i < vsi->num_queues; i++, que++)
1881 ixlv_enable_queue_irq(hw, que->me);
1885 ixlv_disable_intr(struct ixl_vsi *vsi)
1887 struct i40e_hw *hw = vsi->hw;
1888 struct ixl_queue *que = vsi->queues;
1890 ixlv_disable_adminq_irq(hw);
1891 for (int i = 0; i < vsi->num_queues; i++, que++)
1892 ixlv_disable_queue_irq(hw, que->me);
1897 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1899 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1900 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1902 rd32(hw, I40E_VFGEN_RSTAT);
1907 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1909 wr32(hw, I40E_VFINT_DYN_CTL01,
1910 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1911 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1912 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1914 rd32(hw, I40E_VFGEN_RSTAT);
1919 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1923 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1924 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
1925 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1929 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1931 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1932 rd32(hw, I40E_VFGEN_RSTAT);
1938 ** Provide a update to the queue RX
1939 ** interrupt moderation value.
1942 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1944 struct ixl_vsi *vsi = que->vsi;
1945 struct i40e_hw *hw = vsi->hw;
1946 struct rx_ring *rxr = &que->rxr;
1952 /* Idle, do nothing */
1953 if (rxr->bytes == 0)
1956 if (ixlv_dynamic_rx_itr) {
1957 rx_bytes = rxr->bytes/rxr->itr;
1960 /* Adjust latency range */
1961 switch (rxr->latency) {
1962 case IXL_LOW_LATENCY:
1963 if (rx_bytes > 10) {
1964 rx_latency = IXL_AVE_LATENCY;
1965 rx_itr = IXL_ITR_20K;
1968 case IXL_AVE_LATENCY:
1969 if (rx_bytes > 20) {
1970 rx_latency = IXL_BULK_LATENCY;
1971 rx_itr = IXL_ITR_8K;
1972 } else if (rx_bytes <= 10) {
1973 rx_latency = IXL_LOW_LATENCY;
1974 rx_itr = IXL_ITR_100K;
1977 case IXL_BULK_LATENCY:
1978 if (rx_bytes <= 20) {
1979 rx_latency = IXL_AVE_LATENCY;
1980 rx_itr = IXL_ITR_20K;
1985 rxr->latency = rx_latency;
1987 if (rx_itr != rxr->itr) {
1988 /* do an exponential smoothing */
1989 rx_itr = (10 * rx_itr * rxr->itr) /
1990 ((9 * rx_itr) + rxr->itr);
1991 rxr->itr = rx_itr & IXL_MAX_ITR;
1992 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1993 que->me), rxr->itr);
1995 } else { /* We may have have toggled to non-dynamic */
1996 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1997 vsi->rx_itr_setting = ixlv_rx_itr;
1998 /* Update the hardware if needed */
1999 if (rxr->itr != vsi->rx_itr_setting) {
2000 rxr->itr = vsi->rx_itr_setting;
2001 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2002 que->me), rxr->itr);
2012 ** Provide a update to the queue TX
2013 ** interrupt moderation value.
2016 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2018 struct ixl_vsi *vsi = que->vsi;
2019 struct i40e_hw *hw = vsi->hw;
2020 struct tx_ring *txr = &que->txr;
2026 /* Idle, do nothing */
2027 if (txr->bytes == 0)
2030 if (ixlv_dynamic_tx_itr) {
2031 tx_bytes = txr->bytes/txr->itr;
2034 switch (txr->latency) {
2035 case IXL_LOW_LATENCY:
2036 if (tx_bytes > 10) {
2037 tx_latency = IXL_AVE_LATENCY;
2038 tx_itr = IXL_ITR_20K;
2041 case IXL_AVE_LATENCY:
2042 if (tx_bytes > 20) {
2043 tx_latency = IXL_BULK_LATENCY;
2044 tx_itr = IXL_ITR_8K;
2045 } else if (tx_bytes <= 10) {
2046 tx_latency = IXL_LOW_LATENCY;
2047 tx_itr = IXL_ITR_100K;
2050 case IXL_BULK_LATENCY:
2051 if (tx_bytes <= 20) {
2052 tx_latency = IXL_AVE_LATENCY;
2053 tx_itr = IXL_ITR_20K;
2058 txr->latency = tx_latency;
2060 if (tx_itr != txr->itr) {
2061 /* do an exponential smoothing */
2062 tx_itr = (10 * tx_itr * txr->itr) /
2063 ((9 * tx_itr) + txr->itr);
2064 txr->itr = tx_itr & IXL_MAX_ITR;
2065 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2066 que->me), txr->itr);
2069 } else { /* We may have have toggled to non-dynamic */
2070 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2071 vsi->tx_itr_setting = ixlv_tx_itr;
2072 /* Update the hardware if needed */
2073 if (txr->itr != vsi->tx_itr_setting) {
2074 txr->itr = vsi->tx_itr_setting;
2075 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2076 que->me), txr->itr);
2087 ** MSIX Interrupt Handlers and Tasklets
2091 ixlv_handle_que(void *context, int pending)
2093 struct ixl_queue *que = context;
2094 struct ixl_vsi *vsi = que->vsi;
2095 struct i40e_hw *hw = vsi->hw;
2096 struct tx_ring *txr = &que->txr;
2097 struct ifnet *ifp = vsi->ifp;
2100 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2101 more = ixl_rxeof(que, IXL_RX_LIMIT);
2102 mtx_lock(&txr->mtx);
2104 if (!drbr_empty(ifp, txr->br))
2105 ixl_mq_start_locked(ifp, txr);
2106 mtx_unlock(&txr->mtx);
2108 taskqueue_enqueue(que->tq, &que->task);
2113 /* Reenable this interrupt - hmmm */
2114 ixlv_enable_queue_irq(hw, que->me);
2119 /*********************************************************************
2121 * MSIX Queue Interrupt Service routine
2123 **********************************************************************/
2125 ixlv_msix_que(void *arg)
2127 struct ixl_queue *que = arg;
2128 struct ixl_vsi *vsi = que->vsi;
2129 struct i40e_hw *hw = vsi->hw;
2130 struct tx_ring *txr = &que->txr;
2131 bool more_tx, more_rx;
2133 /* Spurious interrupts are ignored */
2134 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2139 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2141 mtx_lock(&txr->mtx);
2142 more_tx = ixl_txeof(que);
2144 ** Make certain that if the stack
2145 ** has anything queued the task gets
2146 ** scheduled to handle it.
2148 if (!drbr_empty(vsi->ifp, txr->br))
2150 mtx_unlock(&txr->mtx);
2152 ixlv_set_queue_rx_itr(que);
2153 ixlv_set_queue_tx_itr(que);
2155 if (more_tx || more_rx)
2156 taskqueue_enqueue(que->tq, &que->task);
2158 ixlv_enable_queue_irq(hw, que->me);
2164 /*********************************************************************
2166 * Media Ioctl callback
2168 * This routine is called whenever the user queries the status of
2169 * the interface using ifconfig.
2171 **********************************************************************/
2173 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2175 struct ixl_vsi *vsi = ifp->if_softc;
2176 struct ixlv_sc *sc = vsi->back;
2178 INIT_DBG_IF(ifp, "begin");
2182 ixlv_update_link_status(sc);
2184 ifmr->ifm_status = IFM_AVALID;
2185 ifmr->ifm_active = IFM_ETHER;
2187 if (!vsi->link_up) {
2188 mtx_unlock(&sc->mtx);
2189 INIT_DBG_IF(ifp, "end: link not up");
2193 ifmr->ifm_status |= IFM_ACTIVE;
2194 /* Hardware is always full-duplex */
2195 ifmr->ifm_active |= IFM_FDX;
2196 mtx_unlock(&sc->mtx);
2197 INIT_DBG_IF(ifp, "end");
2201 /*********************************************************************
2203 * Media Ioctl callback
2205 * This routine is called when the user changes speed/duplex using
2206 * media/mediopt option with ifconfig.
2208 **********************************************************************/
2210 ixlv_media_change(struct ifnet * ifp)
2212 struct ixl_vsi *vsi = ifp->if_softc;
2213 struct ifmedia *ifm = &vsi->media;
2215 INIT_DBG_IF(ifp, "begin");
2217 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2220 INIT_DBG_IF(ifp, "end");
2225 /*********************************************************************
2226 * Multicast Initialization
2228 * This routine is called by init to reset a fresh state.
2230 **********************************************************************/
2233 ixlv_init_multi(struct ixl_vsi *vsi)
2235 struct ixlv_mac_filter *f;
2236 struct ixlv_sc *sc = vsi->back;
2239 IOCTL_DBG_IF(vsi->ifp, "begin");
2241 /* First clear any multicast filters */
2242 SLIST_FOREACH(f, sc->mac_filters, next) {
2243 if ((f->flags & IXL_FILTER_USED)
2244 && (f->flags & IXL_FILTER_MC)) {
2245 f->flags |= IXL_FILTER_DEL;
2250 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2251 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2254 IOCTL_DBG_IF(vsi->ifp, "end");
2258 ixlv_add_multi(struct ixl_vsi *vsi)
2260 struct ifmultiaddr *ifma;
2261 struct ifnet *ifp = vsi->ifp;
2262 struct ixlv_sc *sc = vsi->back;
2265 IOCTL_DBG_IF(ifp, "begin");
2267 if_maddr_rlock(ifp);
2269 ** Get a count, to decide if we
2270 ** simply use multicast promiscuous.
2272 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2273 if (ifma->ifma_addr->sa_family != AF_LINK)
2277 if_maddr_runlock(ifp);
2279 // TODO: Remove -- cannot set promiscuous mode in a VF
2280 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2281 /* delete all multicast filters */
2282 ixlv_init_multi(vsi);
2283 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2284 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2285 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2287 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2292 if_maddr_rlock(ifp);
2293 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2294 if (ifma->ifma_addr->sa_family != AF_LINK)
2296 if (!ixlv_add_mac_filter(sc,
2297 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2301 if_maddr_runlock(ifp);
2303 ** Notify AQ task that sw filters need to be
2307 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2308 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2311 IOCTL_DBG_IF(ifp, "end");
2315 ixlv_del_multi(struct ixl_vsi *vsi)
2317 struct ixlv_mac_filter *f;
2318 struct ifmultiaddr *ifma;
2319 struct ifnet *ifp = vsi->ifp;
2320 struct ixlv_sc *sc = vsi->back;
2324 IOCTL_DBG_IF(ifp, "begin");
2326 /* Search for removed multicast addresses */
2327 if_maddr_rlock(ifp);
2328 SLIST_FOREACH(f, sc->mac_filters, next) {
2329 if ((f->flags & IXL_FILTER_USED)
2330 && (f->flags & IXL_FILTER_MC)) {
2331 /* check if mac address in filter is in sc's list */
2333 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2334 if (ifma->ifma_addr->sa_family != AF_LINK)
2337 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2338 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2343 /* if this filter is not in the sc's list, remove it */
2344 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2345 f->flags |= IXL_FILTER_DEL;
2347 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2348 MAC_FORMAT_ARGS(f->macaddr));
2350 else if (match == FALSE)
2351 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2352 MAC_FORMAT_ARGS(f->macaddr));
2355 if_maddr_runlock(ifp);
2358 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2359 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2362 IOCTL_DBG_IF(ifp, "end");
2365 /*********************************************************************
2368 * This routine checks for link status,updates statistics,
2369 * and runs the watchdog check.
2371 **********************************************************************/
2374 ixlv_local_timer(void *arg)
2376 struct ixlv_sc *sc = arg;
2377 struct i40e_hw *hw = &sc->hw;
2378 struct ixl_vsi *vsi = &sc->vsi;
2379 struct ixl_queue *que = vsi->queues;
2380 device_t dev = sc->dev;
2384 IXLV_CORE_LOCK_ASSERT(sc);
2386 /* If Reset is in progress just bail */
2387 if (sc->init_state == IXLV_RESET_PENDING)
2390 /* Check for when PF triggers a VF reset */
2391 val = rd32(hw, I40E_VFGEN_RSTAT) &
2392 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2394 if (val != I40E_VFR_VFACTIVE
2395 && val != I40E_VFR_COMPLETED) {
2396 DDPRINTF(dev, "reset in progress! (%d)", val);
2400 ixlv_request_stats(sc);
2402 /* clean and process any events */
2403 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2406 ** Check status on the queues for a hang
2408 mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2409 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2411 for (int i = 0; i < vsi->num_queues; i++,que++) {
2412 /* Any queues with outstanding work get a sw irq */
2414 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2416 ** Each time txeof runs without cleaning, but there
2417 ** are uncleaned descriptors it increments busy. If
2418 ** we get to 5 we declare it hung.
2420 if (que->busy == IXL_QUEUE_HUNG) {
2422 /* Mark the queue as inactive */
2423 vsi->active_queues &= ~((u64)1 << que->me);
2426 /* Check if we've come back from hung */
2427 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2428 vsi->active_queues |= ((u64)1 << que->me);
2430 if (que->busy >= IXL_MAX_TX_BUSY) {
2431 device_printf(dev,"Warning queue %d "
2432 "appears to be hung!\n", i);
2433 que->busy = IXL_QUEUE_HUNG;
2437 /* Only reset when all queues show hung */
2438 if (hung == vsi->num_queues)
2440 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2444 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2445 sc->init_state = IXLV_RESET_REQUIRED;
2446 ixlv_init_locked(sc);
2450 ** Note: this routine updates the OS on the link state
2451 ** the real check of the hardware only happens with
2452 ** a link interrupt.
2455 ixlv_update_link_status(struct ixlv_sc *sc)
2457 struct ixl_vsi *vsi = &sc->vsi;
2458 struct ifnet *ifp = vsi->ifp;
2459 device_t dev = sc->dev;
2462 if (vsi->link_active == FALSE) {
2464 device_printf(dev,"Link is Up, %d Gbps\n",
2465 (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2466 vsi->link_active = TRUE;
2467 if_link_state_change(ifp, LINK_STATE_UP);
2469 } else { /* Link down */
2470 if (vsi->link_active == TRUE) {
2472 device_printf(dev,"Link is Down\n");
2473 if_link_state_change(ifp, LINK_STATE_DOWN);
2474 vsi->link_active = FALSE;
2481 /*********************************************************************
2483 * This routine disables all traffic on the adapter by issuing a
2484 * global reset on the MAC and deallocates TX/RX buffers.
2486 **********************************************************************/
2489 ixlv_stop(struct ixlv_sc *sc)
2495 INIT_DBG_IF(ifp, "begin");
2497 IXLV_CORE_LOCK_ASSERT(sc);
2499 ixl_vc_flush(&sc->vc_mgr);
2500 ixlv_disable_queues(sc);
2503 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2504 ((ticks - start) < hz/10))
2505 ixlv_do_adminq_locked(sc);
2507 /* Stop the local timer */
2508 callout_stop(&sc->timer);
2510 INIT_DBG_IF(ifp, "end");
2514 /*********************************************************************
2516 * Free all station queue structs.
2518 **********************************************************************/
2520 ixlv_free_queues(struct ixl_vsi *vsi)
2522 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2523 struct ixl_queue *que = vsi->queues;
2525 for (int i = 0; i < vsi->num_queues; i++, que++) {
2526 struct tx_ring *txr = &que->txr;
2527 struct rx_ring *rxr = &que->rxr;
2529 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2532 ixl_free_que_tx(que);
2534 i40e_free_dma_mem(&sc->hw, &txr->dma);
2536 IXL_TX_LOCK_DESTROY(txr);
2538 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2541 ixl_free_que_rx(que);
2543 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2545 IXL_RX_LOCK_DESTROY(rxr);
2548 free(vsi->queues, M_DEVBUF);
2553 ** ixlv_config_rss - setup RSS
2555 ** RSS keys and table are cleared on VF reset.
2558 ixlv_config_rss(struct ixlv_sc *sc)
2560 struct i40e_hw *hw = &sc->hw;
2561 struct ixl_vsi *vsi = &sc->vsi;
2563 u64 set_hena = 0, hena;
2566 u32 rss_hash_config;
2567 u32 rss_seed[IXL_KEYSZ];
2569 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2570 0x183cfd8c, 0xce880440, 0x580cbc3c,
2571 0x35897377, 0x328b25e1, 0x4fa98922,
2572 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2575 /* Don't set up RSS if using a single queue */
2576 if (vsi->num_queues == 1) {
2577 wr32(hw, I40E_VFQF_HENA(0), 0);
2578 wr32(hw, I40E_VFQF_HENA(1), 0);
2584 /* Fetch the configured RSS key */
2585 rss_getkey((uint8_t *) &rss_seed);
2587 /* Fill out hash function seed */
2588 for (i = 0; i <= IXL_KEYSZ; i++)
2589 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2591 /* Enable PCTYPES for RSS: */
2593 rss_hash_config = rss_gethashconfig();
2594 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2595 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2596 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2597 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2598 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2599 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2600 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2601 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2602 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2603 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2604 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2605 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2606 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2607 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2610 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2611 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2612 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2613 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2614 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2615 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2616 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2617 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2618 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2619 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2620 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2622 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2623 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2625 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2626 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2628 /* Populate the LUT with max no. of queues in round robin fashion */
2629 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2630 if (j == vsi->num_queues)
2634 * Fetch the RSS bucket id for the given indirection entry.
2635 * Cap it at the number of configured buckets (which is
2638 que_id = rss_get_indirection_to_bucket(i);
2639 que_id = que_id % vsi->num_queues;
2643 /* lut = 4-byte sliding window of 4 lut entries */
2644 lut = (lut << 8) | (que_id & 0xF);
2645 /* On i = 3, we have 4 entries in lut; write to the register */
2647 wr32(hw, I40E_VFQF_HLUT(i), lut);
2648 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2656 ** This routine refreshes vlan filters, called by init
2657 ** it scans the filter table and then updates the AQ
2660 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2662 struct ixl_vsi *vsi = &sc->vsi;
2663 struct ixlv_vlan_filter *f;
2666 if (vsi->num_vlans == 0)
2669 ** Scan the filter table for vlan entries,
2670 ** and if found call for the AQ update.
2672 SLIST_FOREACH(f, sc->vlan_filters, next)
2673 if (f->flags & IXL_FILTER_ADD)
2676 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2677 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2682 ** This routine adds new MAC filters to the sc's list;
2683 ** these are later added in hardware by sending a virtual
2687 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2689 struct ixlv_mac_filter *f;
2690 device_t dev = sc->dev;
2692 /* Does one already exist? */
2693 f = ixlv_find_mac_filter(sc, macaddr);
2695 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2696 MAC_FORMAT_ARGS(macaddr));
2700 /* If not, get a new empty filter */
2701 f = ixlv_get_mac_filter(sc);
2703 device_printf(dev, "%s: no filters available!!\n",
2708 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2709 MAC_FORMAT_ARGS(macaddr));
2711 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2712 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2718 ** Marks a MAC filter for deletion.
2721 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2723 struct ixlv_mac_filter *f;
2725 f = ixlv_find_mac_filter(sc, macaddr);
2729 f->flags |= IXL_FILTER_DEL;
2734 ** Tasklet handler for MSIX Adminq interrupts
2735 ** - done outside interrupt context since it might sleep
2738 ixlv_do_adminq(void *context, int pending)
2740 struct ixlv_sc *sc = context;
2743 ixlv_do_adminq_locked(sc);
2744 mtx_unlock(&sc->mtx);
2749 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2751 struct i40e_hw *hw = &sc->hw;
2752 struct i40e_arq_event_info event;
2753 struct i40e_virtchnl_msg *v_msg;
2757 IXLV_CORE_LOCK_ASSERT(sc);
2759 event.buf_len = IXL_AQ_BUF_SZ;
2760 event.msg_buf = sc->aq_buffer;
2761 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2764 ret = i40e_clean_arq_element(hw, &event, &result);
2767 ixlv_vc_completion(sc, v_msg->v_opcode,
2768 v_msg->v_retval, event.msg_buf, event.msg_len);
2770 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2773 ixlv_enable_adminq_irq(hw);
2777 ixlv_add_sysctls(struct ixlv_sc *sc)
2779 device_t dev = sc->dev;
2780 struct ixl_vsi *vsi = &sc->vsi;
2781 struct i40e_eth_stats *es = &vsi->eth_stats;
2783 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2784 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2785 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2787 struct sysctl_oid *vsi_node, *queue_node;
2788 struct sysctl_oid_list *vsi_list, *queue_list;
2790 #define QUEUE_NAME_LEN 32
2791 char queue_namebuf[QUEUE_NAME_LEN];
2793 struct ixl_queue *queues = vsi->queues;
2794 struct tx_ring *txr;
2795 struct rx_ring *rxr;
2797 /* Driver statistics sysctls */
2798 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2799 CTLFLAG_RD, &sc->watchdog_events,
2800 "Watchdog timeouts");
2801 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2802 CTLFLAG_RD, &sc->admin_irq,
2803 "Admin Queue IRQ Handled");
2805 /* VSI statistics sysctls */
2806 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2807 CTLFLAG_RD, NULL, "VSI-specific statistics");
2808 vsi_list = SYSCTL_CHILDREN(vsi_node);
2810 struct ixl_sysctl_info ctls[] =
2812 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2813 {&es->rx_unicast, "ucast_pkts_rcvd",
2814 "Unicast Packets Received"},
2815 {&es->rx_multicast, "mcast_pkts_rcvd",
2816 "Multicast Packets Received"},
2817 {&es->rx_broadcast, "bcast_pkts_rcvd",
2818 "Broadcast Packets Received"},
2819 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2820 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2821 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2822 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2823 {&es->tx_multicast, "mcast_pkts_txd",
2824 "Multicast Packets Transmitted"},
2825 {&es->tx_broadcast, "bcast_pkts_txd",
2826 "Broadcast Packets Transmitted"},
2827 {&es->tx_errors, "tx_errors", "TX packet errors"},
2831 struct ixl_sysctl_info *entry = ctls;
2832 while (entry->stat != 0)
2834 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2835 CTLFLAG_RD, entry->stat,
2836 entry->description);
2841 for (int q = 0; q < vsi->num_queues; q++) {
2842 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2843 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2844 CTLFLAG_RD, NULL, "Queue Name");
2845 queue_list = SYSCTL_CHILDREN(queue_node);
2847 txr = &(queues[q].txr);
2848 rxr = &(queues[q].rxr);
2850 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2851 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2852 "m_defrag() failed");
2853 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2854 CTLFLAG_RD, &(queues[q].dropped_pkts),
2855 "Driver dropped packets");
2856 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2857 CTLFLAG_RD, &(queues[q].irqs),
2858 "irqs on this queue");
2859 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2860 CTLFLAG_RD, &(queues[q].tso),
2862 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2863 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2864 "Driver tx dma failure in xmit");
2865 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2866 CTLFLAG_RD, &(txr->no_desc),
2867 "Queue No Descriptor Available");
2868 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2869 CTLFLAG_RD, &(txr->total_packets),
2870 "Queue Packets Transmitted");
2871 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2872 CTLFLAG_RD, &(txr->tx_bytes),
2873 "Queue Bytes Transmitted");
2874 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2875 CTLFLAG_RD, &(rxr->rx_packets),
2876 "Queue Packets Received");
2877 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2878 CTLFLAG_RD, &(rxr->rx_bytes),
2879 "Queue Bytes Received");
2881 /* Examine queue state */
2882 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2883 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2884 sizeof(struct ixl_queue),
2885 ixlv_sysctl_qtx_tail_handler, "IU",
2886 "Queue Transmit Descriptor Tail");
2887 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2888 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2889 sizeof(struct ixl_queue),
2890 ixlv_sysctl_qrx_tail_handler, "IU",
2891 "Queue Receive Descriptor Tail");
2896 ixlv_init_filters(struct ixlv_sc *sc)
2898 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2899 M_DEVBUF, M_NOWAIT | M_ZERO);
2900 SLIST_INIT(sc->mac_filters);
2901 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2902 M_DEVBUF, M_NOWAIT | M_ZERO);
2903 SLIST_INIT(sc->vlan_filters);
2908 ixlv_free_filters(struct ixlv_sc *sc)
2910 struct ixlv_mac_filter *f;
2911 struct ixlv_vlan_filter *v;
2913 while (!SLIST_EMPTY(sc->mac_filters)) {
2914 f = SLIST_FIRST(sc->mac_filters);
2915 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2918 while (!SLIST_EMPTY(sc->vlan_filters)) {
2919 v = SLIST_FIRST(sc->vlan_filters);
2920 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2927 * ixlv_sysctl_qtx_tail_handler
2928 * Retrieves I40E_QTX_TAIL1 value from hardware
2932 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2934 struct ixl_queue *que;
2938 que = ((struct ixl_queue *)oidp->oid_arg1);
2941 val = rd32(que->vsi->hw, que->txr.tail);
2942 error = sysctl_handle_int(oidp, &val, 0, req);
2943 if (error || !req->newptr)
2949 * ixlv_sysctl_qrx_tail_handler
2950 * Retrieves I40E_QRX_TAIL1 value from hardware
2954 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2956 struct ixl_queue *que;
2960 que = ((struct ixl_queue *)oidp->oid_arg1);
2963 val = rd32(que->vsi->hw, que->rxr.tail);
2964 error = sysctl_handle_int(oidp, &val, 0, req);
2965 if (error || !req->newptr)