1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
45 #include <net/rss_config.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixlv_driver_version[] = "1.2.11-k";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixlv_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixl_vendor_info_t ixlv_vendor_info_array[] =
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
67 /* required last entry */
71 /*********************************************************************
72 * Table of branding strings
73 *********************************************************************/
75 static char *ixlv_strings[] = {
76 "Intel(R) Ethernet Connection XL710 VF Driver"
80 /*********************************************************************
82 *********************************************************************/
83 static int ixlv_probe(device_t);
84 static int ixlv_attach(device_t);
85 static int ixlv_detach(device_t);
86 static int ixlv_shutdown(device_t);
87 static void ixlv_init_locked(struct ixlv_sc *);
88 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
89 static void ixlv_free_pci_resources(struct ixlv_sc *);
90 static int ixlv_assign_msix(struct ixlv_sc *);
91 static int ixlv_init_msix(struct ixlv_sc *);
92 static int ixlv_init_taskqueue(struct ixlv_sc *);
93 static int ixlv_setup_queues(struct ixlv_sc *);
94 static void ixlv_config_rss(struct ixlv_sc *);
95 static void ixlv_stop(struct ixlv_sc *);
96 static void ixlv_add_multi(struct ixl_vsi *);
97 static void ixlv_del_multi(struct ixl_vsi *);
98 static void ixlv_free_queues(struct ixl_vsi *);
99 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
101 static int ixlv_media_change(struct ifnet *);
102 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
104 static void ixlv_local_timer(void *);
106 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
107 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
108 static void ixlv_init_filters(struct ixlv_sc *);
109 static void ixlv_free_filters(struct ixlv_sc *);
111 static void ixlv_msix_que(void *);
112 static void ixlv_msix_adminq(void *);
113 static void ixlv_do_adminq(void *, int);
114 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
115 static void ixlv_handle_que(void *, int);
116 static int ixlv_reset(struct ixlv_sc *);
117 static int ixlv_reset_complete(struct i40e_hw *);
118 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
119 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
120 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
121 enum i40e_status_code);
123 static void ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
128 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
132 static void ixlv_init_hw(struct ixlv_sc *);
133 static int ixlv_setup_vc(struct ixlv_sc *);
134 static int ixlv_vf_config(struct ixlv_sc *);
136 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
137 struct ifnet *, int);
139 static void ixlv_add_sysctls(struct ixlv_sc *);
140 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
141 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
143 /*********************************************************************
144 * FreeBSD Device Interface Entry Points
145 *********************************************************************/
147 static device_method_t ixlv_methods[] = {
148 /* Device interface */
149 DEVMETHOD(device_probe, ixlv_probe),
150 DEVMETHOD(device_attach, ixlv_attach),
151 DEVMETHOD(device_detach, ixlv_detach),
152 DEVMETHOD(device_shutdown, ixlv_shutdown),
156 static driver_t ixlv_driver = {
157 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
160 devclass_t ixlv_devclass;
161 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
163 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
164 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
167 ** TUNEABLE PARAMETERS:
170 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
171 "IXLV driver parameters");
174 ** Number of descriptors per ring:
175 ** - TX and RX are the same size
177 static int ixlv_ringsz = DEFAULT_RING;
178 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
179 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
180 &ixlv_ringsz, 0, "Descriptor Ring Size");
182 /* Set to zero to auto calculate */
183 int ixlv_max_queues = 0;
184 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
185 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
186 &ixlv_max_queues, 0, "Number of Queues");
189 ** Number of entries in Tx queue buf_ring.
190 ** Increasing this will reduce the number of
191 ** errors when transmitting fragmented UDP
194 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
195 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
196 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
197 &ixlv_txbrsz, 0, "TX Buf Ring Size");
200 ** Controls for Interrupt Throttling
201 ** - true/false for dynamic adjustment
202 ** - default values for static ITR
204 int ixlv_dynamic_rx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
207 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
209 int ixlv_dynamic_tx_itr = 0;
210 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
212 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
214 int ixlv_rx_itr = IXL_ITR_8K;
215 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
217 &ixlv_rx_itr, 0, "RX Interrupt Rate");
219 int ixlv_tx_itr = IXL_ITR_4K;
220 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
221 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
222 &ixlv_tx_itr, 0, "TX Interrupt Rate");
225 /*********************************************************************
226 * Device identification routine
228 * ixlv_probe determines if the driver should be loaded on
229 * the hardware based on PCI vendor/device id of the device.
231 * return BUS_PROBE_DEFAULT on success, positive on failure
232 *********************************************************************/
235 ixlv_probe(device_t dev)
237 ixl_vendor_info_t *ent;
239 u16 pci_vendor_id, pci_device_id;
240 u16 pci_subvendor_id, pci_subdevice_id;
241 char device_name[256];
244 INIT_DEBUGOUT("ixlv_probe: begin");
247 pci_vendor_id = pci_get_vendor(dev);
248 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
251 pci_device_id = pci_get_device(dev);
252 pci_subvendor_id = pci_get_subvendor(dev);
253 pci_subdevice_id = pci_get_subdevice(dev);
255 ent = ixlv_vendor_info_array;
256 while (ent->vendor_id != 0) {
257 if ((pci_vendor_id == ent->vendor_id) &&
258 (pci_device_id == ent->device_id) &&
260 ((pci_subvendor_id == ent->subvendor_id) ||
261 (ent->subvendor_id == 0)) &&
263 ((pci_subdevice_id == ent->subdevice_id) ||
264 (ent->subdevice_id == 0))) {
265 sprintf(device_name, "%s, Version - %s",
266 ixlv_strings[ent->index],
267 ixlv_driver_version);
268 device_set_desc_copy(dev, device_name);
269 return (BUS_PROBE_DEFAULT);
276 /*********************************************************************
277 * Device initialization routine
279 * The attach entry point is called when the driver is being loaded.
280 * This routine identifies the type of hardware, allocates all resources
281 * and initializes the hardware.
283 * return 0 on success, positive on failure
284 *********************************************************************/
287 ixlv_attach(device_t dev)
294 INIT_DBG_DEV(dev, "begin");
296 /* Allocate, clear, and link in our primary soft structure */
297 sc = device_get_softc(dev);
298 sc->dev = sc->osdep.dev = dev;
303 /* Initialize hw struct */
306 /* Allocate filter lists */
307 ixlv_init_filters(sc);
310 mtx_init(&sc->mtx, device_get_nameunit(dev),
311 "IXL SC Lock", MTX_DEF);
313 /* Set up the timer callout */
314 callout_init_mtx(&sc->timer, &sc->mtx, 0);
316 /* Do PCI setup - map BAR0, etc */
317 if (ixlv_allocate_pci_resources(sc)) {
318 device_printf(dev, "%s: Allocation of PCI resources failed\n",
324 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
326 error = i40e_set_mac_type(hw);
328 device_printf(dev, "%s: set_mac_type failed: %d\n",
333 error = ixlv_reset_complete(hw);
335 device_printf(dev, "%s: Device is still being reset\n",
340 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
342 error = ixlv_setup_vc(sc);
344 device_printf(dev, "%s: Error setting up PF comms, %d\n",
349 INIT_DBG_DEV(dev, "PF API version verified");
351 /* Need API version before sending reset message */
352 error = ixlv_reset(sc);
354 device_printf(dev, "VF reset failed; reload the driver\n");
358 INIT_DBG_DEV(dev, "VF reset complete");
360 /* Ask for VF config from PF */
361 error = ixlv_vf_config(sc);
363 device_printf(dev, "Error getting configuration from PF: %d\n",
368 INIT_DBG_DEV(dev, "VF config from PF:");
369 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
370 sc->vf_res->num_vsis,
371 sc->vf_res->num_queue_pairs,
372 sc->vf_res->max_vectors,
373 sc->vf_res->max_mtu);
374 INIT_DBG_DEV(dev, "Offload flags: %#010x",
375 sc->vf_res->vf_offload_flags);
377 /* got VF config message back from PF, now we can parse it */
378 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
379 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
380 sc->vsi_res = &sc->vf_res->vsi_res[i];
383 device_printf(dev, "%s: no LAN VSI found\n", __func__);
388 INIT_DBG_DEV(dev, "Resource Acquisition complete");
390 /* If no mac address was assigned just make a random one */
391 if (!ixlv_check_ether_addr(hw->mac.addr)) {
392 u8 addr[ETHER_ADDR_LEN];
393 arc4rand(&addr, sizeof(addr), 0);
396 bcopy(addr, hw->mac.addr, sizeof(addr));
399 vsi->id = sc->vsi_res->vsi_id;
400 vsi->back = (void *)sc;
403 /* This allocates the memory and early settings */
404 if (ixlv_setup_queues(sc) != 0) {
405 device_printf(dev, "%s: setup queues failed!\n",
411 /* Setup the stack interface */
412 if (ixlv_setup_interface(dev, sc) != 0) {
413 device_printf(dev, "%s: setup interface failed!\n",
419 INIT_DBG_DEV(dev, "Queue memory and interface setup");
421 /* Do queue interrupt setup */
422 ixlv_assign_msix(sc);
424 /* Start AdminQ taskqueue */
425 ixlv_init_taskqueue(sc);
427 /* Initialize stats */
428 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
429 ixlv_add_sysctls(sc);
431 /* Register for VLAN events */
432 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
433 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
434 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
435 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
437 /* We want AQ enabled early */
438 ixlv_enable_adminq_irq(hw);
440 /* Set things up to run init */
441 sc->init_state = IXLV_INIT_READY;
443 ixl_vc_init_mgr(sc, &sc->vc_mgr);
445 INIT_DBG_DEV(dev, "end");
449 ixlv_free_queues(vsi);
451 free(sc->vf_res, M_DEVBUF);
453 i40e_shutdown_adminq(hw);
455 ixlv_free_pci_resources(sc);
457 mtx_destroy(&sc->mtx);
458 ixlv_free_filters(sc);
459 INIT_DBG_DEV(dev, "end: error %d", error);
463 /*********************************************************************
464 * Device removal routine
466 * The detach entry point is called when the driver is being removed.
467 * This routine stops the adapter and deallocates all the resources
468 * that were allocated for driver operation.
470 * return 0 on success, positive on failure
471 *********************************************************************/
474 ixlv_detach(device_t dev)
476 struct ixlv_sc *sc = device_get_softc(dev);
477 struct ixl_vsi *vsi = &sc->vsi;
479 INIT_DBG_DEV(dev, "begin");
481 /* Make sure VLANS are not using driver */
482 if (vsi->ifp->if_vlantrunk != NULL) {
483 if_printf(vsi->ifp, "Vlan in use, detach first\n");
484 INIT_DBG_DEV(dev, "end");
489 ether_ifdetach(vsi->ifp);
490 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
493 mtx_unlock(&sc->mtx);
496 /* Unregister VLAN events */
497 if (vsi->vlan_attach != NULL)
498 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
499 if (vsi->vlan_detach != NULL)
500 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
503 callout_drain(&sc->vc_mgr.callout);
505 i40e_shutdown_adminq(&sc->hw);
506 taskqueue_free(sc->tq);
508 free(sc->vf_res, M_DEVBUF);
509 ixlv_free_pci_resources(sc);
510 ixlv_free_queues(vsi);
511 mtx_destroy(&sc->mtx);
512 ixlv_free_filters(sc);
514 bus_generic_detach(dev);
515 INIT_DBG_DEV(dev, "end");
519 /*********************************************************************
521 * Shutdown entry point
523 **********************************************************************/
526 ixlv_shutdown(device_t dev)
528 struct ixlv_sc *sc = device_get_softc(dev);
530 INIT_DBG_DEV(dev, "begin");
534 mtx_unlock(&sc->mtx);
536 INIT_DBG_DEV(dev, "end");
541 * Configure TXCSUM(IPV6) and TSO(4/6)
542 * - the hardware handles these together so we
546 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
548 /* Enable/disable TXCSUM/TSO4 */
549 if (!(ifp->if_capenable & IFCAP_TXCSUM)
550 && !(ifp->if_capenable & IFCAP_TSO4)) {
551 if (mask & IFCAP_TXCSUM) {
552 ifp->if_capenable |= IFCAP_TXCSUM;
553 /* enable TXCSUM, restore TSO if previously enabled */
554 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
555 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
556 ifp->if_capenable |= IFCAP_TSO4;
559 else if (mask & IFCAP_TSO4) {
560 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
561 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
563 "TSO4 requires txcsum, enabling both...\n");
565 } else if((ifp->if_capenable & IFCAP_TXCSUM)
566 && !(ifp->if_capenable & IFCAP_TSO4)) {
567 if (mask & IFCAP_TXCSUM)
568 ifp->if_capenable &= ~IFCAP_TXCSUM;
569 else if (mask & IFCAP_TSO4)
570 ifp->if_capenable |= IFCAP_TSO4;
571 } else if((ifp->if_capenable & IFCAP_TXCSUM)
572 && (ifp->if_capenable & IFCAP_TSO4)) {
573 if (mask & IFCAP_TXCSUM) {
574 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
575 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
577 "TSO4 requires txcsum, disabling both...\n");
578 } else if (mask & IFCAP_TSO4)
579 ifp->if_capenable &= ~IFCAP_TSO4;
582 /* Enable/disable TXCSUM_IPV6/TSO6 */
583 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
584 && !(ifp->if_capenable & IFCAP_TSO6)) {
585 if (mask & IFCAP_TXCSUM_IPV6) {
586 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
587 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
588 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
589 ifp->if_capenable |= IFCAP_TSO6;
591 } else if (mask & IFCAP_TSO6) {
592 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
593 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
595 "TSO6 requires txcsum6, enabling both...\n");
597 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
598 && !(ifp->if_capenable & IFCAP_TSO6)) {
599 if (mask & IFCAP_TXCSUM_IPV6)
600 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
601 else if (mask & IFCAP_TSO6)
602 ifp->if_capenable |= IFCAP_TSO6;
603 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604 && (ifp->if_capenable & IFCAP_TSO6)) {
605 if (mask & IFCAP_TXCSUM_IPV6) {
606 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
607 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
609 "TSO6 requires txcsum6, disabling both...\n");
610 } else if (mask & IFCAP_TSO6)
611 ifp->if_capenable &= ~IFCAP_TSO6;
615 /*********************************************************************
618 * ixlv_ioctl is called when the user wants to configure the
621 * return 0 on success, positive on failure
622 **********************************************************************/
625 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
627 struct ixl_vsi *vsi = ifp->if_softc;
628 struct ixlv_sc *sc = vsi->back;
629 struct ifreq *ifr = (struct ifreq *)data;
630 #if defined(INET) || defined(INET6)
631 struct ifaddr *ifa = (struct ifaddr *)data;
632 bool avoid_reset = FALSE;
641 if (ifa->ifa_addr->sa_family == AF_INET)
645 if (ifa->ifa_addr->sa_family == AF_INET6)
648 #if defined(INET) || defined(INET6)
650 ** Calling init results in link renegotiation,
651 ** so we avoid doing it when possible.
654 ifp->if_flags |= IFF_UP;
655 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
658 if (!(ifp->if_flags & IFF_NOARP))
659 arp_ifinit(ifp, ifa);
662 error = ether_ioctl(ifp, command, data);
666 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
668 if (ifr->ifr_mtu > IXL_MAX_FRAME -
669 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
671 IOCTL_DBG_IF(ifp, "mtu too large");
673 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
674 // ERJ: Interestingly enough, these types don't match
675 ifp->if_mtu = (u_long)ifr->ifr_mtu;
676 vsi->max_frame_size =
677 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
678 + ETHER_VLAN_ENCAP_LEN;
679 ixlv_init_locked(sc);
681 mtx_unlock(&sc->mtx);
684 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
686 if (ifp->if_flags & IFF_UP) {
687 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
688 ixlv_init_locked(sc);
690 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
692 sc->if_flags = ifp->if_flags;
693 mtx_unlock(&sc->mtx);
696 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
697 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
699 ixlv_disable_intr(vsi);
701 ixlv_enable_intr(vsi);
702 mtx_unlock(&sc->mtx);
706 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
707 if (sc->init_state == IXLV_RUNNING) {
709 ixlv_disable_intr(vsi);
711 ixlv_enable_intr(vsi);
712 mtx_unlock(&sc->mtx);
717 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
718 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
722 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
723 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
725 ixlv_cap_txcsum_tso(vsi, ifp, mask);
727 if (mask & IFCAP_RXCSUM)
728 ifp->if_capenable ^= IFCAP_RXCSUM;
729 if (mask & IFCAP_RXCSUM_IPV6)
730 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
731 if (mask & IFCAP_LRO)
732 ifp->if_capenable ^= IFCAP_LRO;
733 if (mask & IFCAP_VLAN_HWTAGGING)
734 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
735 if (mask & IFCAP_VLAN_HWFILTER)
736 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
737 if (mask & IFCAP_VLAN_HWTSO)
738 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
739 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
742 VLAN_CAPABILITIES(ifp);
748 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
749 error = ether_ioctl(ifp, command, data);
757 ** To do a reinit on the VF is unfortunately more complicated
758 ** than a physical device, we must have the PF more or less
759 ** completely recreate our memory, so many things that were
760 ** done only once at attach in traditional drivers now must be
761 ** redone at each reinitialization. This function does that
762 ** 'prelude' so we can then call the normal locked init code.
765 ixlv_reinit_locked(struct ixlv_sc *sc)
767 struct i40e_hw *hw = &sc->hw;
768 struct ixl_vsi *vsi = &sc->vsi;
769 struct ifnet *ifp = vsi->ifp;
770 struct ixlv_mac_filter *mf, *mf_temp;
771 struct ixlv_vlan_filter *vf;
774 INIT_DBG_IF(ifp, "begin");
776 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
779 error = ixlv_reset(sc);
781 INIT_DBG_IF(ifp, "VF was reset");
783 /* set the state in case we went thru RESET */
784 sc->init_state = IXLV_RUNNING;
787 ** Resetting the VF drops all filters from hardware;
788 ** we need to mark them to be re-added in init.
790 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
791 if (mf->flags & IXL_FILTER_DEL) {
792 SLIST_REMOVE(sc->mac_filters, mf,
793 ixlv_mac_filter, next);
796 mf->flags |= IXL_FILTER_ADD;
798 if (vsi->num_vlans != 0)
799 SLIST_FOREACH(vf, sc->vlan_filters, next)
800 vf->flags = IXL_FILTER_ADD;
801 else { /* clean any stale filters */
802 while (!SLIST_EMPTY(sc->vlan_filters)) {
803 vf = SLIST_FIRST(sc->vlan_filters);
804 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
809 ixlv_enable_adminq_irq(hw);
810 ixl_vc_flush(&sc->vc_mgr);
812 INIT_DBG_IF(ifp, "end");
817 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
818 enum i40e_status_code code)
825 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
826 * happens while a command is in progress, so we don't print an error
829 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
830 if_printf(sc->vsi.ifp,
831 "Error %d waiting for PF to complete operation %d\n",
837 ixlv_init_locked(struct ixlv_sc *sc)
839 struct i40e_hw *hw = &sc->hw;
840 struct ixl_vsi *vsi = &sc->vsi;
841 struct ixl_queue *que = vsi->queues;
842 struct ifnet *ifp = vsi->ifp;
845 INIT_DBG_IF(ifp, "begin");
847 IXLV_CORE_LOCK_ASSERT(sc);
849 /* Do a reinit first if an init has already been done */
850 if ((sc->init_state == IXLV_RUNNING) ||
851 (sc->init_state == IXLV_RESET_REQUIRED) ||
852 (sc->init_state == IXLV_RESET_PENDING))
853 error = ixlv_reinit_locked(sc);
854 /* Don't bother with init if we failed reinit */
858 /* Remove existing MAC filter if new MAC addr is set */
859 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
860 error = ixlv_del_mac_filter(sc, hw->mac.addr);
862 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
863 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
867 /* Check for an LAA mac address... */
868 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
870 ifp->if_hwassist = 0;
871 if (ifp->if_capenable & IFCAP_TSO)
872 ifp->if_hwassist |= CSUM_TSO;
873 if (ifp->if_capenable & IFCAP_TXCSUM)
874 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
875 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
876 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
878 /* Add mac filter for this VF to PF */
879 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
880 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
881 if (!error || error == EEXIST)
882 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
883 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
887 /* Setup vlan's if needed */
888 ixlv_setup_vlan_filters(sc);
890 /* Prepare the queues for operation */
891 for (int i = 0; i < vsi->num_queues; i++, que++) {
892 struct rx_ring *rxr = &que->rxr;
894 ixl_init_tx_ring(que);
896 if (vsi->max_frame_size <= MCLBYTES)
897 rxr->mbuf_sz = MCLBYTES;
899 rxr->mbuf_sz = MJUMPAGESIZE;
900 ixl_init_rx_ring(que);
903 /* Configure queues */
904 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
905 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
911 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
912 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
915 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
916 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
918 /* Start the local timer */
919 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
921 sc->init_state = IXLV_RUNNING;
924 INIT_DBG_IF(ifp, "end");
929 ** Init entry point for the stack
934 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
935 struct ixlv_sc *sc = vsi->back;
939 ixlv_init_locked(sc);
940 mtx_unlock(&sc->mtx);
942 /* Wait for init_locked to finish */
943 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
944 && ++retries < IXLV_AQ_MAX_ERR) {
947 if (retries >= IXLV_AQ_MAX_ERR)
949 "Init failed to complete in allotted time!\n");
953 * ixlv_attach() helper function; gathers information about
954 * the (virtual) hardware for use elsewhere in the driver.
957 ixlv_init_hw(struct ixlv_sc *sc)
959 struct i40e_hw *hw = &sc->hw;
960 device_t dev = sc->dev;
962 /* Save off the information about this board */
963 hw->vendor_id = pci_get_vendor(dev);
964 hw->device_id = pci_get_device(dev);
965 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
966 hw->subsystem_vendor_id =
967 pci_read_config(dev, PCIR_SUBVEND_0, 2);
968 hw->subsystem_device_id =
969 pci_read_config(dev, PCIR_SUBDEV_0, 2);
971 hw->bus.device = pci_get_slot(dev);
972 hw->bus.func = pci_get_function(dev);
976 * ixlv_attach() helper function; initalizes the admin queue
977 * and attempts to establish contact with the PF by
978 * retrying the initial "API version" message several times
979 * or until the PF responds.
982 ixlv_setup_vc(struct ixlv_sc *sc)
984 struct i40e_hw *hw = &sc->hw;
985 device_t dev = sc->dev;
986 int error = 0, ret_error = 0, asq_retries = 0;
987 bool send_api_ver_retried = 0;
989 /* Need to set these AQ paramters before initializing AQ */
990 hw->aq.num_arq_entries = IXL_AQ_LEN;
991 hw->aq.num_asq_entries = IXL_AQ_LEN;
992 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
993 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
995 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
996 /* Initialize admin queue */
997 error = i40e_init_adminq(hw);
999 device_printf(dev, "%s: init_adminq failed: %d\n",
1005 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1006 " send_api_ver attempt %d", i+1);
1009 /* Send VF's API version */
1010 error = ixlv_send_api_ver(sc);
1012 i40e_shutdown_adminq(hw);
1014 device_printf(dev, "%s: unable to send api"
1015 " version to PF on attempt %d, error %d\n",
1016 __func__, i+1, error);
1020 while (!i40e_asq_done(hw)) {
1021 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1022 i40e_shutdown_adminq(hw);
1023 DDPRINTF(dev, "Admin Queue timeout "
1024 "(waiting for send_api_ver), %d more retries...",
1025 IXLV_AQ_MAX_ERR - (i + 1));
1029 i40e_msec_delay(10);
1031 if (asq_retries > IXLV_AQ_MAX_ERR)
1034 INIT_DBG_DEV(dev, "Sent API version message to PF");
1036 /* Verify that the VF accepts the PF's API version */
1037 error = ixlv_verify_api_ver(sc);
1038 if (error == ETIMEDOUT) {
1039 if (!send_api_ver_retried) {
1040 /* Resend message, one more time */
1041 send_api_ver_retried++;
1043 "%s: Timeout while verifying API version on first"
1044 " try!\n", __func__);
1048 "%s: Timeout while verifying API version on second"
1049 " try!\n", __func__);
1056 "%s: Unable to verify API version,"
1057 " error %d\n", __func__, error);
1064 i40e_shutdown_adminq(hw);
1069 * ixlv_attach() helper function; asks the PF for this VF's
1070 * configuration, and saves the information if it receives it.
1073 ixlv_vf_config(struct ixlv_sc *sc)
1075 struct i40e_hw *hw = &sc->hw;
1076 device_t dev = sc->dev;
1077 int bufsz, error = 0, ret_error = 0;
1078 int asq_retries, retried = 0;
1081 error = ixlv_send_vf_config_msg(sc);
1084 "%s: Unable to send VF config request, attempt %d,"
1085 " error %d\n", __func__, retried + 1, error);
1090 while (!i40e_asq_done(hw)) {
1091 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1092 device_printf(dev, "%s: Admin Queue timeout "
1093 "(waiting for send_vf_config_msg), attempt %d\n",
1094 __func__, retried + 1);
1098 i40e_msec_delay(10);
1101 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1105 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1106 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1107 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1110 "%s: Unable to allocate memory for VF configuration"
1111 " message from PF on attempt %d\n", __func__, retried + 1);
1117 /* Check for VF config response */
1118 error = ixlv_get_vf_config(sc);
1119 if (error == ETIMEDOUT) {
1120 /* The 1st time we timeout, send the configuration message again */
1126 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1131 "%s: Unable to get VF configuration from PF after %d tries!\n",
1132 __func__, retried + 1);
1138 free(sc->vf_res, M_DEVBUF);
1144 * Allocate MSI/X vectors, setup the AQ vector early
1147 ixlv_init_msix(struct ixlv_sc *sc)
1149 device_t dev = sc->dev;
1150 int rid, want, vectors, queues, available;
1152 rid = PCIR_BAR(IXL_BAR);
1153 sc->msix_mem = bus_alloc_resource_any(dev,
1154 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1155 if (!sc->msix_mem) {
1156 /* May not be enabled */
1157 device_printf(sc->dev,
1158 "Unable to map MSIX table \n");
1162 available = pci_msix_count(dev);
1163 if (available == 0) { /* system has msix disabled */
1164 bus_release_resource(dev, SYS_RES_MEMORY,
1166 sc->msix_mem = NULL;
1170 /* Figure out a reasonable auto config value */
1171 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1173 /* Override with hardcoded value if sane */
1174 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1175 queues = ixlv_max_queues;
1177 /* If we're doing RSS, clamp at the number of RSS buckets */
1178 if (queues > rss_getnumbuckets())
1179 queues = rss_getnumbuckets();
1181 /* Enforce the VF max value */
1182 if (queues > IXLV_MAX_QUEUES)
1183 queues = IXLV_MAX_QUEUES;
1186 ** Want one vector (RX/TX pair) per queue
1187 ** plus an additional for the admin queue.
1190 if (want <= available) /* Have enough */
1193 device_printf(sc->dev,
1194 "MSIX Configuration Problem, "
1195 "%d vectors available but %d wanted!\n",
1202 * If we're doing RSS, the number of queues needs to
1203 * match the number of RSS buckets that are configured.
1205 * + If there's more queues than RSS buckets, we'll end
1206 * up with queues that get no traffic.
1208 * + If there's more RSS buckets than queues, we'll end
1209 * up having multiple RSS buckets map to the same queue,
1210 * so there'll be some contention.
1212 if (queues != rss_getnumbuckets()) {
1214 "%s: queues (%d) != RSS buckets (%d)"
1215 "; performance will be impacted.\n",
1216 __func__, queues, rss_getnumbuckets());
1220 if (pci_alloc_msix(dev, &vectors) == 0) {
1221 device_printf(sc->dev,
1222 "Using MSIX interrupts with %d vectors\n", vectors);
1224 sc->vsi.num_queues = queues;
1228 ** Explicitly set the guest PCI BUSMASTER capability
1229 ** and we must rewrite the ENABLE in the MSIX control
1230 ** register again at this point to cause the host to
1231 ** successfully initialize us.
1236 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1237 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1238 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1239 pci_find_cap(dev, PCIY_MSIX, &rid);
1240 rid += PCIR_MSIX_CTRL;
1241 msix_ctrl = pci_read_config(dev, rid, 2);
1242 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1243 pci_write_config(dev, rid, msix_ctrl, 2);
1246 /* Next we need to setup the vector for the Admin Queue */
1247 rid = 1; // zero vector + 1
1248 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1249 &rid, RF_SHAREABLE | RF_ACTIVE);
1250 if (sc->res == NULL) {
1251 device_printf(dev,"Unable to allocate"
1252 " bus resource: AQ interrupt \n");
1255 if (bus_setup_intr(dev, sc->res,
1256 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1257 ixlv_msix_adminq, sc, &sc->tag)) {
1259 device_printf(dev, "Failed to register AQ handler");
1262 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1267 /* The VF driver MUST use MSIX */
1272 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1275 device_t dev = sc->dev;
1278 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1281 if (!(sc->pci_mem)) {
1282 device_printf(dev,"Unable to allocate bus resource: memory\n");
1286 sc->osdep.mem_bus_space_tag =
1287 rman_get_bustag(sc->pci_mem);
1288 sc->osdep.mem_bus_space_handle =
1289 rman_get_bushandle(sc->pci_mem);
1290 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1291 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1292 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1294 sc->hw.back = &sc->osdep;
1296 /* Disable adminq interrupts */
1297 ixlv_disable_adminq_irq(&sc->hw);
1300 ** Now setup MSI/X, it will return
1301 ** us the number of supported vectors
1303 sc->msix = ixlv_init_msix(sc);
1305 /* We fail without MSIX support */
1313 ixlv_free_pci_resources(struct ixlv_sc *sc)
1315 struct ixl_vsi *vsi = &sc->vsi;
1316 struct ixl_queue *que = vsi->queues;
1317 device_t dev = sc->dev;
1319 /* We may get here before stations are setup */
1324 ** Release all msix queue resources:
1326 for (int i = 0; i < vsi->num_queues; i++, que++) {
1327 int rid = que->msix + 1;
1328 if (que->tag != NULL) {
1329 bus_teardown_intr(dev, que->res, que->tag);
1332 if (que->res != NULL)
1333 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1337 /* Clean the AdminQ interrupt */
1338 if (sc->tag != NULL) {
1339 bus_teardown_intr(dev, sc->res, sc->tag);
1342 if (sc->res != NULL)
1343 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1345 pci_release_msi(dev);
1347 if (sc->msix_mem != NULL)
1348 bus_release_resource(dev, SYS_RES_MEMORY,
1349 PCIR_BAR(IXL_BAR), sc->msix_mem);
1351 if (sc->pci_mem != NULL)
1352 bus_release_resource(dev, SYS_RES_MEMORY,
1353 PCIR_BAR(0), sc->pci_mem);
1359 * Create taskqueue and tasklet for Admin Queue interrupts.
1362 ixlv_init_taskqueue(struct ixlv_sc *sc)
1366 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1368 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1369 taskqueue_thread_enqueue, &sc->tq);
1370 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1371 device_get_nameunit(sc->dev));
1376 /*********************************************************************
1378 * Setup MSIX Interrupt resources and handlers for the VSI queues
1380 **********************************************************************/
1382 ixlv_assign_msix(struct ixlv_sc *sc)
1384 device_t dev = sc->dev;
1385 struct ixl_vsi *vsi = &sc->vsi;
1386 struct ixl_queue *que = vsi->queues;
1387 struct tx_ring *txr;
1388 int error, rid, vector = 1;
1393 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1397 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1398 RF_SHAREABLE | RF_ACTIVE);
1399 if (que->res == NULL) {
1400 device_printf(dev,"Unable to allocate"
1401 " bus resource: que interrupt [%d]\n", vector);
1404 /* Set the handler function */
1405 error = bus_setup_intr(dev, que->res,
1406 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1407 ixlv_msix_que, que, &que->tag);
1410 device_printf(dev, "Failed to register que handler");
1413 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1414 /* Bind the vector to a CPU */
1416 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1418 bus_bind_intr(dev, que->res, cpu_id);
1420 vsi->que_mask |= (u64)(1 << que->msix);
1421 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1422 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1423 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1424 taskqueue_thread_enqueue, &que->tq);
1426 CPU_SETOF(cpu_id, &cpu_mask);
1427 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1428 &cpu_mask, "%s (bucket %d)",
1429 device_get_nameunit(dev), cpu_id);
1431 taskqueue_start_threads(&que->tq, 1, PI_NET,
1432 "%s que", device_get_nameunit(dev));
1441 ** Requests a VF reset from the PF.
1443 ** Requires the VF's Admin Queue to be initialized.
1446 ixlv_reset(struct ixlv_sc *sc)
1448 struct i40e_hw *hw = &sc->hw;
1449 device_t dev = sc->dev;
1452 /* Ask the PF to reset us if we are initiating */
1453 if (sc->init_state != IXLV_RESET_PENDING)
1454 ixlv_request_reset(sc);
1456 i40e_msec_delay(100);
1457 error = ixlv_reset_complete(hw);
1459 device_printf(dev, "%s: VF reset failed\n",
1464 error = i40e_shutdown_adminq(hw);
1466 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1471 error = i40e_init_adminq(hw);
1473 device_printf(dev, "%s: init_adminq failed: %d\n",
1482 ixlv_reset_complete(struct i40e_hw *hw)
1486 for (int i = 0; i < 100; i++) {
1487 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1488 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1490 if ((reg == I40E_VFR_VFACTIVE) ||
1491 (reg == I40E_VFR_COMPLETED))
1493 i40e_msec_delay(100);
1500 /*********************************************************************
1502 * Setup networking device structure and register an interface.
1504 **********************************************************************/
1506 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1509 struct ixl_vsi *vsi = &sc->vsi;
1510 struct ixl_queue *que = vsi->queues;
1512 INIT_DBG_DEV(dev, "begin");
1514 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1516 device_printf(dev, "%s: could not allocate ifnet"
1517 " structure!\n", __func__);
1521 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1523 ifp->if_mtu = ETHERMTU;
1524 ifp->if_baudrate = 4000000000; // ??
1525 ifp->if_init = ixlv_init;
1526 ifp->if_softc = vsi;
1527 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1528 ifp->if_ioctl = ixlv_ioctl;
1530 #if __FreeBSD_version >= 1100000
1531 if_setgetcounterfn(ifp, ixl_get_counter);
1534 ifp->if_transmit = ixl_mq_start;
1536 ifp->if_qflush = ixl_qflush;
1537 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1539 ether_ifattach(ifp, sc->hw.mac.addr);
1541 vsi->max_frame_size =
1542 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1543 + ETHER_VLAN_ENCAP_LEN;
1546 * Tell the upper layer(s) we support long frames.
1548 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1550 ifp->if_capabilities |= IFCAP_HWCSUM;
1551 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1552 ifp->if_capabilities |= IFCAP_TSO;
1553 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1555 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1560 ifp->if_capenable = ifp->if_capabilities;
1563 ** Don't turn this on by default, if vlans are
1564 ** created on another pseudo device (eg. lagg)
1565 ** then vlan events are not passed thru, breaking
1566 ** operation, but with HW FILTER off it works. If
1567 ** using vlans directly on the ixl driver you can
1568 ** enable this and get full hardware tag filtering.
1570 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1573 * Specify the media types supported by this adapter and register
1574 * callbacks to update media and link information
1576 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1579 // JFV Add media types later?
1581 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1582 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1584 INIT_DBG_DEV(dev, "end");
1589 ** Allocate and setup the interface queues
1592 ixlv_setup_queues(struct ixlv_sc *sc)
1594 device_t dev = sc->dev;
1595 struct ixl_vsi *vsi;
1596 struct ixl_queue *que;
1597 struct tx_ring *txr;
1598 struct rx_ring *rxr;
1600 int error = I40E_SUCCESS;
1603 vsi->back = (void *)sc;
1607 /* Get memory for the station queues */
1609 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1610 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1611 device_printf(dev, "Unable to allocate queue memory\n");
1616 for (int i = 0; i < vsi->num_queues; i++) {
1617 que = &vsi->queues[i];
1618 que->num_desc = ixlv_ringsz;
1621 /* mark the queue as active */
1622 vsi->active_queues |= (u64)1 << que->me;
1626 txr->tail = I40E_QTX_TAIL1(que->me);
1627 /* Initialize the TX lock */
1628 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1629 device_get_nameunit(dev), que->me);
1630 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1632 ** Create the TX descriptor ring, the extra int is
1633 ** added as the location for HEAD WB.
1635 tsize = roundup2((que->num_desc *
1636 sizeof(struct i40e_tx_desc)) +
1637 sizeof(u32), DBA_ALIGN);
1638 if (i40e_allocate_dma_mem(&sc->hw,
1639 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1641 "Unable to allocate TX Descriptor memory\n");
1645 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1646 bzero((void *)txr->base, tsize);
1647 /* Now allocate transmit soft structs for the ring */
1648 if (ixl_allocate_tx_data(que)) {
1650 "Critical Failure setting up TX structures\n");
1654 /* Allocate a buf ring */
1655 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1656 M_WAITOK, &txr->mtx);
1657 if (txr->br == NULL) {
1659 "Critical Failure setting up TX buf ring\n");
1665 * Next the RX queues...
1667 rsize = roundup2(que->num_desc *
1668 sizeof(union i40e_rx_desc), DBA_ALIGN);
1671 rxr->tail = I40E_QRX_TAIL1(que->me);
1673 /* Initialize the RX side lock */
1674 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1675 device_get_nameunit(dev), que->me);
1676 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1678 if (i40e_allocate_dma_mem(&sc->hw,
1679 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1681 "Unable to allocate RX Descriptor memory\n");
1685 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1686 bzero((void *)rxr->base, rsize);
1688 /* Allocate receive soft structs for the ring*/
1689 if (ixl_allocate_rx_data(que)) {
1691 "Critical Failure setting up receive structs\n");
1700 for (int i = 0; i < vsi->num_queues; i++) {
1701 que = &vsi->queues[i];
1705 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1707 i40e_free_dma_mem(&sc->hw, &txr->dma);
1709 free(vsi->queues, M_DEVBUF);
1716 ** This routine is run via an vlan config EVENT,
1717 ** it enables us to use the HW Filter table since
1718 ** we can get the vlan id. This just creates the
1719 ** entry in the soft version of the VFTA, init will
1720 ** repopulate the real table.
1723 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1725 struct ixl_vsi *vsi = arg;
1726 struct ixlv_sc *sc = vsi->back;
1727 struct ixlv_vlan_filter *v;
1730 if (ifp->if_softc != arg) /* Not our event */
1733 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1736 /* Sanity check - make sure it doesn't already exist */
1737 SLIST_FOREACH(v, sc->vlan_filters, next) {
1738 if (v->vlan == vtag)
1744 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1745 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1747 v->flags = IXL_FILTER_ADD;
1748 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1749 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1750 mtx_unlock(&sc->mtx);
1755 ** This routine is run via an vlan
1756 ** unconfig EVENT, remove our entry
1757 ** in the soft vfta.
1760 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1762 struct ixl_vsi *vsi = arg;
1763 struct ixlv_sc *sc = vsi->back;
1764 struct ixlv_vlan_filter *v;
1767 if (ifp->if_softc != arg)
1770 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1774 SLIST_FOREACH(v, sc->vlan_filters, next) {
1775 if (v->vlan == vtag) {
1776 v->flags = IXL_FILTER_DEL;
1782 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1783 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1784 mtx_unlock(&sc->mtx);
1789 ** Get a new filter and add it to the mac filter list.
1791 static struct ixlv_mac_filter *
1792 ixlv_get_mac_filter(struct ixlv_sc *sc)
1794 struct ixlv_mac_filter *f;
1796 f = malloc(sizeof(struct ixlv_mac_filter),
1797 M_DEVBUF, M_NOWAIT | M_ZERO);
1799 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1805 ** Find the filter with matching MAC address
1807 static struct ixlv_mac_filter *
1808 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1810 struct ixlv_mac_filter *f;
1813 SLIST_FOREACH(f, sc->mac_filters, next) {
1814 if (cmp_etheraddr(f->macaddr, macaddr)) {
1826 ** Admin Queue interrupt handler
1829 ixlv_msix_adminq(void *arg)
1831 struct ixlv_sc *sc = arg;
1832 struct i40e_hw *hw = &sc->hw;
1835 reg = rd32(hw, I40E_VFINT_ICR01);
1836 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1838 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1839 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1840 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1843 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1848 ixlv_enable_intr(struct ixl_vsi *vsi)
1850 struct i40e_hw *hw = vsi->hw;
1851 struct ixl_queue *que = vsi->queues;
1853 ixlv_enable_adminq_irq(hw);
1854 for (int i = 0; i < vsi->num_queues; i++, que++)
1855 ixlv_enable_queue_irq(hw, que->me);
1859 ixlv_disable_intr(struct ixl_vsi *vsi)
1861 struct i40e_hw *hw = vsi->hw;
1862 struct ixl_queue *que = vsi->queues;
1864 ixlv_disable_adminq_irq(hw);
1865 for (int i = 0; i < vsi->num_queues; i++, que++)
1866 ixlv_disable_queue_irq(hw, que->me);
1871 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1873 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1874 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1876 rd32(hw, I40E_VFGEN_RSTAT);
1881 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1883 wr32(hw, I40E_VFINT_DYN_CTL01,
1884 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1885 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1886 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1888 rd32(hw, I40E_VFGEN_RSTAT);
1893 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1897 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1898 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
1899 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1903 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1905 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1906 rd32(hw, I40E_VFGEN_RSTAT);
1912 ** Provide a update to the queue RX
1913 ** interrupt moderation value.
1916 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1918 struct ixl_vsi *vsi = que->vsi;
1919 struct i40e_hw *hw = vsi->hw;
1920 struct rx_ring *rxr = &que->rxr;
1926 /* Idle, do nothing */
1927 if (rxr->bytes == 0)
1930 if (ixlv_dynamic_rx_itr) {
1931 rx_bytes = rxr->bytes/rxr->itr;
1934 /* Adjust latency range */
1935 switch (rxr->latency) {
1936 case IXL_LOW_LATENCY:
1937 if (rx_bytes > 10) {
1938 rx_latency = IXL_AVE_LATENCY;
1939 rx_itr = IXL_ITR_20K;
1942 case IXL_AVE_LATENCY:
1943 if (rx_bytes > 20) {
1944 rx_latency = IXL_BULK_LATENCY;
1945 rx_itr = IXL_ITR_8K;
1946 } else if (rx_bytes <= 10) {
1947 rx_latency = IXL_LOW_LATENCY;
1948 rx_itr = IXL_ITR_100K;
1951 case IXL_BULK_LATENCY:
1952 if (rx_bytes <= 20) {
1953 rx_latency = IXL_AVE_LATENCY;
1954 rx_itr = IXL_ITR_20K;
1959 rxr->latency = rx_latency;
1961 if (rx_itr != rxr->itr) {
1962 /* do an exponential smoothing */
1963 rx_itr = (10 * rx_itr * rxr->itr) /
1964 ((9 * rx_itr) + rxr->itr);
1965 rxr->itr = rx_itr & IXL_MAX_ITR;
1966 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1967 que->me), rxr->itr);
1969 } else { /* We may have have toggled to non-dynamic */
1970 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1971 vsi->rx_itr_setting = ixlv_rx_itr;
1972 /* Update the hardware if needed */
1973 if (rxr->itr != vsi->rx_itr_setting) {
1974 rxr->itr = vsi->rx_itr_setting;
1975 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1976 que->me), rxr->itr);
1986 ** Provide a update to the queue TX
1987 ** interrupt moderation value.
1990 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1992 struct ixl_vsi *vsi = que->vsi;
1993 struct i40e_hw *hw = vsi->hw;
1994 struct tx_ring *txr = &que->txr;
2000 /* Idle, do nothing */
2001 if (txr->bytes == 0)
2004 if (ixlv_dynamic_tx_itr) {
2005 tx_bytes = txr->bytes/txr->itr;
2008 switch (txr->latency) {
2009 case IXL_LOW_LATENCY:
2010 if (tx_bytes > 10) {
2011 tx_latency = IXL_AVE_LATENCY;
2012 tx_itr = IXL_ITR_20K;
2015 case IXL_AVE_LATENCY:
2016 if (tx_bytes > 20) {
2017 tx_latency = IXL_BULK_LATENCY;
2018 tx_itr = IXL_ITR_8K;
2019 } else if (tx_bytes <= 10) {
2020 tx_latency = IXL_LOW_LATENCY;
2021 tx_itr = IXL_ITR_100K;
2024 case IXL_BULK_LATENCY:
2025 if (tx_bytes <= 20) {
2026 tx_latency = IXL_AVE_LATENCY;
2027 tx_itr = IXL_ITR_20K;
2032 txr->latency = tx_latency;
2034 if (tx_itr != txr->itr) {
2035 /* do an exponential smoothing */
2036 tx_itr = (10 * tx_itr * txr->itr) /
2037 ((9 * tx_itr) + txr->itr);
2038 txr->itr = tx_itr & IXL_MAX_ITR;
2039 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2040 que->me), txr->itr);
2043 } else { /* We may have have toggled to non-dynamic */
2044 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2045 vsi->tx_itr_setting = ixlv_tx_itr;
2046 /* Update the hardware if needed */
2047 if (txr->itr != vsi->tx_itr_setting) {
2048 txr->itr = vsi->tx_itr_setting;
2049 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2050 que->me), txr->itr);
2061 ** MSIX Interrupt Handlers and Tasklets
2065 ixlv_handle_que(void *context, int pending)
2067 struct ixl_queue *que = context;
2068 struct ixl_vsi *vsi = que->vsi;
2069 struct i40e_hw *hw = vsi->hw;
2070 struct tx_ring *txr = &que->txr;
2071 struct ifnet *ifp = vsi->ifp;
2074 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2075 more = ixl_rxeof(que, IXL_RX_LIMIT);
2076 mtx_lock(&txr->mtx);
2078 if (!drbr_empty(ifp, txr->br))
2079 ixl_mq_start_locked(ifp, txr);
2080 mtx_unlock(&txr->mtx);
2082 taskqueue_enqueue(que->tq, &que->task);
2087 /* Reenable this interrupt - hmmm */
2088 ixlv_enable_queue_irq(hw, que->me);
2093 /*********************************************************************
2095 * MSIX Queue Interrupt Service routine
2097 **********************************************************************/
2099 ixlv_msix_que(void *arg)
2101 struct ixl_queue *que = arg;
2102 struct ixl_vsi *vsi = que->vsi;
2103 struct i40e_hw *hw = vsi->hw;
2104 struct tx_ring *txr = &que->txr;
2105 bool more_tx, more_rx;
2107 /* Spurious interrupts are ignored */
2108 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2113 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2115 mtx_lock(&txr->mtx);
2116 more_tx = ixl_txeof(que);
2118 ** Make certain that if the stack
2119 ** has anything queued the task gets
2120 ** scheduled to handle it.
2122 if (!drbr_empty(vsi->ifp, txr->br))
2124 mtx_unlock(&txr->mtx);
2126 ixlv_set_queue_rx_itr(que);
2127 ixlv_set_queue_tx_itr(que);
2129 if (more_tx || more_rx)
2130 taskqueue_enqueue(que->tq, &que->task);
2132 ixlv_enable_queue_irq(hw, que->me);
2138 /*********************************************************************
2140 * Media Ioctl callback
2142 * This routine is called whenever the user queries the status of
2143 * the interface using ifconfig.
2145 **********************************************************************/
2147 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2149 struct ixl_vsi *vsi = ifp->if_softc;
2150 struct ixlv_sc *sc = vsi->back;
2152 INIT_DBG_IF(ifp, "begin");
2156 ixlv_update_link_status(sc);
2158 ifmr->ifm_status = IFM_AVALID;
2159 ifmr->ifm_active = IFM_ETHER;
2162 mtx_unlock(&sc->mtx);
2163 INIT_DBG_IF(ifp, "end: link not up");
2167 ifmr->ifm_status |= IFM_ACTIVE;
2168 /* Hardware is always full-duplex */
2169 ifmr->ifm_active |= IFM_FDX;
2170 mtx_unlock(&sc->mtx);
2171 INIT_DBG_IF(ifp, "end");
2175 /*********************************************************************
2177 * Media Ioctl callback
2179 * This routine is called when the user changes speed/duplex using
2180 * media/mediopt option with ifconfig.
2182 **********************************************************************/
2184 ixlv_media_change(struct ifnet * ifp)
2186 struct ixl_vsi *vsi = ifp->if_softc;
2187 struct ifmedia *ifm = &vsi->media;
2189 INIT_DBG_IF(ifp, "begin");
2191 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2194 INIT_DBG_IF(ifp, "end");
2199 /*********************************************************************
2200 * Multicast Initialization
2202 * This routine is called by init to reset a fresh state.
2204 **********************************************************************/
2207 ixlv_init_multi(struct ixl_vsi *vsi)
2209 struct ixlv_mac_filter *f;
2210 struct ixlv_sc *sc = vsi->back;
2213 IOCTL_DBG_IF(vsi->ifp, "begin");
2215 /* First clear any multicast filters */
2216 SLIST_FOREACH(f, sc->mac_filters, next) {
2217 if ((f->flags & IXL_FILTER_USED)
2218 && (f->flags & IXL_FILTER_MC)) {
2219 f->flags |= IXL_FILTER_DEL;
2224 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2225 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2228 IOCTL_DBG_IF(vsi->ifp, "end");
2232 ixlv_add_multi(struct ixl_vsi *vsi)
2234 struct ifmultiaddr *ifma;
2235 struct ifnet *ifp = vsi->ifp;
2236 struct ixlv_sc *sc = vsi->back;
2239 IOCTL_DBG_IF(ifp, "begin");
2241 if_maddr_rlock(ifp);
2243 ** Get a count, to decide if we
2244 ** simply use multicast promiscuous.
2246 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2247 if (ifma->ifma_addr->sa_family != AF_LINK)
2251 if_maddr_runlock(ifp);
2253 // TODO: Remove -- cannot set promiscuous mode in a VF
2254 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2255 /* delete all multicast filters */
2256 ixlv_init_multi(vsi);
2257 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2258 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2259 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2261 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2266 if_maddr_rlock(ifp);
2267 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2268 if (ifma->ifma_addr->sa_family != AF_LINK)
2270 if (!ixlv_add_mac_filter(sc,
2271 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2275 if_maddr_runlock(ifp);
2277 ** Notify AQ task that sw filters need to be
2281 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2282 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2285 IOCTL_DBG_IF(ifp, "end");
2289 ixlv_del_multi(struct ixl_vsi *vsi)
2291 struct ixlv_mac_filter *f;
2292 struct ifmultiaddr *ifma;
2293 struct ifnet *ifp = vsi->ifp;
2294 struct ixlv_sc *sc = vsi->back;
2298 IOCTL_DBG_IF(ifp, "begin");
2300 /* Search for removed multicast addresses */
2301 if_maddr_rlock(ifp);
2302 SLIST_FOREACH(f, sc->mac_filters, next) {
2303 if ((f->flags & IXL_FILTER_USED)
2304 && (f->flags & IXL_FILTER_MC)) {
2305 /* check if mac address in filter is in sc's list */
2307 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2308 if (ifma->ifma_addr->sa_family != AF_LINK)
2311 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2312 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2317 /* if this filter is not in the sc's list, remove it */
2318 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2319 f->flags |= IXL_FILTER_DEL;
2321 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2322 MAC_FORMAT_ARGS(f->macaddr));
2324 else if (match == FALSE)
2325 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2326 MAC_FORMAT_ARGS(f->macaddr));
2329 if_maddr_runlock(ifp);
2332 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2333 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2336 IOCTL_DBG_IF(ifp, "end");
2339 /*********************************************************************
2342 * This routine checks for link status,updates statistics,
2343 * and runs the watchdog check.
2345 **********************************************************************/
2348 ixlv_local_timer(void *arg)
2350 struct ixlv_sc *sc = arg;
2351 struct i40e_hw *hw = &sc->hw;
2352 struct ixl_vsi *vsi = &sc->vsi;
2353 struct ixl_queue *que = vsi->queues;
2354 device_t dev = sc->dev;
2358 IXLV_CORE_LOCK_ASSERT(sc);
2360 /* If Reset is in progress just bail */
2361 if (sc->init_state == IXLV_RESET_PENDING)
2364 /* Check for when PF triggers a VF reset */
2365 val = rd32(hw, I40E_VFGEN_RSTAT) &
2366 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2368 if (val != I40E_VFR_VFACTIVE
2369 && val != I40E_VFR_COMPLETED) {
2370 DDPRINTF(dev, "reset in progress! (%d)", val);
2374 ixlv_request_stats(sc);
2376 /* clean and process any events */
2377 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2380 ** Check status on the queues for a hang
2382 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2383 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2385 for (int i = 0; i < vsi->num_queues; i++,que++) {
2386 /* Any queues with outstanding work get a sw irq */
2388 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2390 ** Each time txeof runs without cleaning, but there
2391 ** are uncleaned descriptors it increments busy. If
2392 ** we get to 5 we declare it hung.
2394 if (que->busy == IXL_QUEUE_HUNG) {
2396 /* Mark the queue as inactive */
2397 vsi->active_queues &= ~((u64)1 << que->me);
2400 /* Check if we've come back from hung */
2401 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2402 vsi->active_queues |= ((u64)1 << que->me);
2404 if (que->busy >= IXL_MAX_TX_BUSY) {
2405 device_printf(dev,"Warning queue %d "
2406 "appears to be hung!\n", i);
2407 que->busy = IXL_QUEUE_HUNG;
2411 /* Only reset when all queues show hung */
2412 if (hung == vsi->num_queues)
2414 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2418 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2419 sc->init_state = IXLV_RESET_REQUIRED;
2420 ixlv_init_locked(sc);
2424 ** Note: this routine updates the OS on the link state
2425 ** the real check of the hardware only happens with
2426 ** a link interrupt.
2429 ixlv_update_link_status(struct ixlv_sc *sc)
2431 struct ixl_vsi *vsi = &sc->vsi;
2432 struct ifnet *ifp = vsi->ifp;
2435 if (vsi->link_active == FALSE) {
2437 if_printf(ifp,"Link is Up, %d Gbps\n",
2438 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2439 vsi->link_active = TRUE;
2440 if_link_state_change(ifp, LINK_STATE_UP);
2442 } else { /* Link down */
2443 if (vsi->link_active == TRUE) {
2445 if_printf(ifp,"Link is Down\n");
2446 if_link_state_change(ifp, LINK_STATE_DOWN);
2447 vsi->link_active = FALSE;
2454 /*********************************************************************
2456 * This routine disables all traffic on the adapter by issuing a
2457 * global reset on the MAC and deallocates TX/RX buffers.
2459 **********************************************************************/
2462 ixlv_stop(struct ixlv_sc *sc)
2468 INIT_DBG_IF(ifp, "begin");
2470 IXLV_CORE_LOCK_ASSERT(sc);
2472 ixl_vc_flush(&sc->vc_mgr);
2473 ixlv_disable_queues(sc);
2476 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2477 ((ticks - start) < hz/10))
2478 ixlv_do_adminq_locked(sc);
2480 /* Stop the local timer */
2481 callout_stop(&sc->timer);
2483 INIT_DBG_IF(ifp, "end");
2487 /*********************************************************************
2489 * Free all station queue structs.
2491 **********************************************************************/
2493 ixlv_free_queues(struct ixl_vsi *vsi)
2495 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2496 struct ixl_queue *que = vsi->queues;
2498 for (int i = 0; i < vsi->num_queues; i++, que++) {
2499 struct tx_ring *txr = &que->txr;
2500 struct rx_ring *rxr = &que->rxr;
2502 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2505 ixl_free_que_tx(que);
2507 i40e_free_dma_mem(&sc->hw, &txr->dma);
2509 IXL_TX_LOCK_DESTROY(txr);
2511 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2514 ixl_free_que_rx(que);
2516 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2518 IXL_RX_LOCK_DESTROY(rxr);
2521 free(vsi->queues, M_DEVBUF);
2526 ** ixlv_config_rss - setup RSS
2528 ** RSS keys and table are cleared on VF reset.
2531 ixlv_config_rss(struct ixlv_sc *sc)
2533 struct i40e_hw *hw = &sc->hw;
2534 struct ixl_vsi *vsi = &sc->vsi;
2536 u64 set_hena = 0, hena;
2539 u32 rss_hash_config;
2540 u32 rss_seed[IXL_KEYSZ];
2542 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2543 0x183cfd8c, 0xce880440, 0x580cbc3c,
2544 0x35897377, 0x328b25e1, 0x4fa98922,
2545 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2548 /* Don't set up RSS if using a single queue */
2549 if (vsi->num_queues == 1) {
2550 wr32(hw, I40E_VFQF_HENA(0), 0);
2551 wr32(hw, I40E_VFQF_HENA(1), 0);
2557 /* Fetch the configured RSS key */
2558 rss_getkey((uint8_t *) &rss_seed);
2560 /* Fill out hash function seed */
2561 for (i = 0; i <= IXL_KEYSZ; i++)
2562 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2564 /* Enable PCTYPES for RSS: */
2566 rss_hash_config = rss_gethashconfig();
2567 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2568 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2569 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2570 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2571 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2572 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2573 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2574 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2575 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2576 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2577 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2578 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2579 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2580 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2583 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2584 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2585 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2586 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2587 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2588 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2589 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2590 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2591 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2592 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2593 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2595 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2596 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2598 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2599 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2601 // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
2602 /* Populate the LUT with max no. of queues in round robin fashion */
2603 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2604 if (j == vsi->num_queues)
2608 * Fetch the RSS bucket id for the given indirection entry.
2609 * Cap it at the number of configured buckets (which is
2612 que_id = rss_get_indirection_to_bucket(i);
2613 que_id = que_id % vsi->num_queues;
2617 /* lut = 4-byte sliding window of 4 lut entries */
2618 lut = (lut << 8) | (que_id & 0xF);
2619 /* On i = 3, we have 4 entries in lut; write to the register */
2621 wr32(hw, I40E_VFQF_HLUT(i), lut);
2622 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2630 ** This routine refreshes vlan filters, called by init
2631 ** it scans the filter table and then updates the AQ
2634 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2636 struct ixl_vsi *vsi = &sc->vsi;
2637 struct ixlv_vlan_filter *f;
2640 if (vsi->num_vlans == 0)
2643 ** Scan the filter table for vlan entries,
2644 ** and if found call for the AQ update.
2646 SLIST_FOREACH(f, sc->vlan_filters, next)
2647 if (f->flags & IXL_FILTER_ADD)
2650 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2651 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2656 ** This routine adds new MAC filters to the sc's list;
2657 ** these are later added in hardware by sending a virtual
2661 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2663 struct ixlv_mac_filter *f;
2665 /* Does one already exist? */
2666 f = ixlv_find_mac_filter(sc, macaddr);
2668 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2669 MAC_FORMAT_ARGS(macaddr));
2673 /* If not, get a new empty filter */
2674 f = ixlv_get_mac_filter(sc);
2676 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2681 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2682 MAC_FORMAT_ARGS(macaddr));
2684 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2685 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2691 ** Marks a MAC filter for deletion.
2694 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2696 struct ixlv_mac_filter *f;
2698 f = ixlv_find_mac_filter(sc, macaddr);
2702 f->flags |= IXL_FILTER_DEL;
2707 ** Tasklet handler for MSIX Adminq interrupts
2708 ** - done outside interrupt context since it might sleep
2711 ixlv_do_adminq(void *context, int pending)
2713 struct ixlv_sc *sc = context;
2716 ixlv_do_adminq_locked(sc);
2717 mtx_unlock(&sc->mtx);
2722 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2724 struct i40e_hw *hw = &sc->hw;
2725 struct i40e_arq_event_info event;
2726 struct i40e_virtchnl_msg *v_msg;
2727 device_t dev = sc->dev;
2732 IXLV_CORE_LOCK_ASSERT(sc);
2734 event.buf_len = IXL_AQ_BUF_SZ;
2735 event.msg_buf = sc->aq_buffer;
2736 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2739 ret = i40e_clean_arq_element(hw, &event, &result);
2742 ixlv_vc_completion(sc, v_msg->v_opcode,
2743 v_msg->v_retval, event.msg_buf, event.msg_len);
2745 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2748 /* check for Admin queue errors */
2749 oldreg = reg = rd32(hw, hw->aq.arq.len);
2750 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2751 device_printf(dev, "ARQ VF Error detected\n");
2752 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2754 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2755 device_printf(dev, "ARQ Overflow Error detected\n");
2756 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2758 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2759 device_printf(dev, "ARQ Critical Error detected\n");
2760 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2763 wr32(hw, hw->aq.arq.len, reg);
2765 oldreg = reg = rd32(hw, hw->aq.asq.len);
2766 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2767 device_printf(dev, "ASQ VF Error detected\n");
2768 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2770 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2771 device_printf(dev, "ASQ Overflow Error detected\n");
2772 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2774 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2775 device_printf(dev, "ASQ Critical Error detected\n");
2776 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2779 wr32(hw, hw->aq.asq.len, reg);
2781 ixlv_enable_adminq_irq(hw);
2785 ixlv_add_sysctls(struct ixlv_sc *sc)
2787 device_t dev = sc->dev;
2788 struct ixl_vsi *vsi = &sc->vsi;
2789 struct i40e_eth_stats *es = &vsi->eth_stats;
2791 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2792 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2793 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2795 struct sysctl_oid *vsi_node, *queue_node;
2796 struct sysctl_oid_list *vsi_list, *queue_list;
2798 #define QUEUE_NAME_LEN 32
2799 char queue_namebuf[QUEUE_NAME_LEN];
2801 struct ixl_queue *queues = vsi->queues;
2802 struct tx_ring *txr;
2803 struct rx_ring *rxr;
2805 /* Driver statistics sysctls */
2806 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2807 CTLFLAG_RD, &sc->watchdog_events,
2808 "Watchdog timeouts");
2809 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2810 CTLFLAG_RD, &sc->admin_irq,
2811 "Admin Queue IRQ Handled");
2813 /* VSI statistics sysctls */
2814 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2815 CTLFLAG_RD, NULL, "VSI-specific statistics");
2816 vsi_list = SYSCTL_CHILDREN(vsi_node);
2818 struct ixl_sysctl_info ctls[] =
2820 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2821 {&es->rx_unicast, "ucast_pkts_rcvd",
2822 "Unicast Packets Received"},
2823 {&es->rx_multicast, "mcast_pkts_rcvd",
2824 "Multicast Packets Received"},
2825 {&es->rx_broadcast, "bcast_pkts_rcvd",
2826 "Broadcast Packets Received"},
2827 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2828 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2829 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2830 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2831 {&es->tx_multicast, "mcast_pkts_txd",
2832 "Multicast Packets Transmitted"},
2833 {&es->tx_broadcast, "bcast_pkts_txd",
2834 "Broadcast Packets Transmitted"},
2835 {&es->tx_errors, "tx_errors", "TX packet errors"},
2839 struct ixl_sysctl_info *entry = ctls;
2840 while (entry->stat != NULL)
2842 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2843 CTLFLAG_RD, entry->stat,
2844 entry->description);
2849 for (int q = 0; q < vsi->num_queues; q++) {
2850 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2851 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2852 CTLFLAG_RD, NULL, "Queue Name");
2853 queue_list = SYSCTL_CHILDREN(queue_node);
2855 txr = &(queues[q].txr);
2856 rxr = &(queues[q].rxr);
2858 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2859 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2860 "m_defrag() failed");
2861 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2862 CTLFLAG_RD, &(queues[q].dropped_pkts),
2863 "Driver dropped packets");
2864 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2865 CTLFLAG_RD, &(queues[q].irqs),
2866 "irqs on this queue");
2867 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2868 CTLFLAG_RD, &(queues[q].tso),
2870 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2871 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2872 "Driver tx dma failure in xmit");
2873 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2874 CTLFLAG_RD, &(txr->no_desc),
2875 "Queue No Descriptor Available");
2876 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2877 CTLFLAG_RD, &(txr->total_packets),
2878 "Queue Packets Transmitted");
2879 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2880 CTLFLAG_RD, &(txr->tx_bytes),
2881 "Queue Bytes Transmitted");
2882 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2883 CTLFLAG_RD, &(rxr->rx_packets),
2884 "Queue Packets Received");
2885 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2886 CTLFLAG_RD, &(rxr->rx_bytes),
2887 "Queue Bytes Received");
2889 /* Examine queue state */
2890 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2891 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2892 sizeof(struct ixl_queue),
2893 ixlv_sysctl_qtx_tail_handler, "IU",
2894 "Queue Transmit Descriptor Tail");
2895 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2896 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2897 sizeof(struct ixl_queue),
2898 ixlv_sysctl_qrx_tail_handler, "IU",
2899 "Queue Receive Descriptor Tail");
2904 ixlv_init_filters(struct ixlv_sc *sc)
2906 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2907 M_DEVBUF, M_NOWAIT | M_ZERO);
2908 SLIST_INIT(sc->mac_filters);
2909 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2910 M_DEVBUF, M_NOWAIT | M_ZERO);
2911 SLIST_INIT(sc->vlan_filters);
2916 ixlv_free_filters(struct ixlv_sc *sc)
2918 struct ixlv_mac_filter *f;
2919 struct ixlv_vlan_filter *v;
2921 while (!SLIST_EMPTY(sc->mac_filters)) {
2922 f = SLIST_FIRST(sc->mac_filters);
2923 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2926 while (!SLIST_EMPTY(sc->vlan_filters)) {
2927 v = SLIST_FIRST(sc->vlan_filters);
2928 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2935 * ixlv_sysctl_qtx_tail_handler
2936 * Retrieves I40E_QTX_TAIL1 value from hardware
2940 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2942 struct ixl_queue *que;
2946 que = ((struct ixl_queue *)oidp->oid_arg1);
2949 val = rd32(que->vsi->hw, que->txr.tail);
2950 error = sysctl_handle_int(oidp, &val, 0, req);
2951 if (error || !req->newptr)
2957 * ixlv_sysctl_qrx_tail_handler
2958 * Retrieves I40E_QRX_TAIL1 value from hardware
2962 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2964 struct ixl_queue *que;
2968 que = ((struct ixl_queue *)oidp->oid_arg1);
2971 val = rd32(que->vsi->hw, que->rxr.tail);
2972 error = sysctl_handle_int(oidp, &val, 0, req);
2973 if (error || !req->newptr)