1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
45 #include <net/rss_config.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixlv_driver_version[] = "1.2.11-k";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixlv_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixl_vendor_info_t ixlv_vendor_info_array[] =
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
67 /* required last entry */
71 /*********************************************************************
72 * Table of branding strings
73 *********************************************************************/
75 static char *ixlv_strings[] = {
76 "Intel(R) Ethernet Connection XL710 VF Driver"
80 /*********************************************************************
82 *********************************************************************/
83 static int ixlv_probe(device_t);
84 static int ixlv_attach(device_t);
85 static int ixlv_detach(device_t);
86 static int ixlv_shutdown(device_t);
87 static void ixlv_init_locked(struct ixlv_sc *);
88 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
89 static void ixlv_free_pci_resources(struct ixlv_sc *);
90 static int ixlv_assign_msix(struct ixlv_sc *);
91 static int ixlv_init_msix(struct ixlv_sc *);
92 static int ixlv_init_taskqueue(struct ixlv_sc *);
93 static int ixlv_setup_queues(struct ixlv_sc *);
94 static void ixlv_config_rss(struct ixlv_sc *);
95 static void ixlv_stop(struct ixlv_sc *);
96 static void ixlv_add_multi(struct ixl_vsi *);
97 static void ixlv_del_multi(struct ixl_vsi *);
98 static void ixlv_free_queues(struct ixl_vsi *);
99 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
101 static int ixlv_media_change(struct ifnet *);
102 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
104 static void ixlv_local_timer(void *);
106 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
107 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
108 static void ixlv_init_filters(struct ixlv_sc *);
109 static void ixlv_free_filters(struct ixlv_sc *);
111 static void ixlv_msix_que(void *);
112 static void ixlv_msix_adminq(void *);
113 static void ixlv_do_adminq(void *, int);
114 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
115 static void ixlv_handle_que(void *, int);
116 static int ixlv_reset(struct ixlv_sc *);
117 static int ixlv_reset_complete(struct i40e_hw *);
118 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
119 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
120 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
121 enum i40e_status_code);
123 static void ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
128 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
132 static void ixlv_init_hw(struct ixlv_sc *);
133 static int ixlv_setup_vc(struct ixlv_sc *);
134 static int ixlv_vf_config(struct ixlv_sc *);
136 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
137 struct ifnet *, int);
139 static void ixlv_add_sysctls(struct ixlv_sc *);
140 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
141 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
143 /*********************************************************************
144 * FreeBSD Device Interface Entry Points
145 *********************************************************************/
147 static device_method_t ixlv_methods[] = {
148 /* Device interface */
149 DEVMETHOD(device_probe, ixlv_probe),
150 DEVMETHOD(device_attach, ixlv_attach),
151 DEVMETHOD(device_detach, ixlv_detach),
152 DEVMETHOD(device_shutdown, ixlv_shutdown),
156 static driver_t ixlv_driver = {
157 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
160 devclass_t ixlv_devclass;
161 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
163 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
164 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
167 ** TUNEABLE PARAMETERS:
170 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
171 "IXLV driver parameters");
174 ** Number of descriptors per ring:
175 ** - TX and RX are the same size
177 static int ixlv_ringsz = DEFAULT_RING;
178 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
179 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
180 &ixlv_ringsz, 0, "Descriptor Ring Size");
182 /* Set to zero to auto calculate */
183 int ixlv_max_queues = 0;
184 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
185 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
186 &ixlv_max_queues, 0, "Number of Queues");
189 ** Number of entries in Tx queue buf_ring.
190 ** Increasing this will reduce the number of
191 ** errors when transmitting fragmented UDP
194 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
195 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
196 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
197 &ixlv_txbrsz, 0, "TX Buf Ring Size");
200 ** Controls for Interrupt Throttling
201 ** - true/false for dynamic adjustment
202 ** - default values for static ITR
204 int ixlv_dynamic_rx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
207 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
209 int ixlv_dynamic_tx_itr = 0;
210 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
212 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
214 int ixlv_rx_itr = IXL_ITR_8K;
215 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
217 &ixlv_rx_itr, 0, "RX Interrupt Rate");
219 int ixlv_tx_itr = IXL_ITR_4K;
220 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
221 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
222 &ixlv_tx_itr, 0, "TX Interrupt Rate");
225 /*********************************************************************
226 * Device identification routine
228 * ixlv_probe determines if the driver should be loaded on
229 * the hardware based on PCI vendor/device id of the device.
231 * return BUS_PROBE_DEFAULT on success, positive on failure
232 *********************************************************************/
235 ixlv_probe(device_t dev)
237 ixl_vendor_info_t *ent;
239 u16 pci_vendor_id, pci_device_id;
240 u16 pci_subvendor_id, pci_subdevice_id;
241 char device_name[256];
244 INIT_DEBUGOUT("ixlv_probe: begin");
247 pci_vendor_id = pci_get_vendor(dev);
248 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
251 pci_device_id = pci_get_device(dev);
252 pci_subvendor_id = pci_get_subvendor(dev);
253 pci_subdevice_id = pci_get_subdevice(dev);
255 ent = ixlv_vendor_info_array;
256 while (ent->vendor_id != 0) {
257 if ((pci_vendor_id == ent->vendor_id) &&
258 (pci_device_id == ent->device_id) &&
260 ((pci_subvendor_id == ent->subvendor_id) ||
261 (ent->subvendor_id == 0)) &&
263 ((pci_subdevice_id == ent->subdevice_id) ||
264 (ent->subdevice_id == 0))) {
265 sprintf(device_name, "%s, Version - %s",
266 ixlv_strings[ent->index],
267 ixlv_driver_version);
268 device_set_desc_copy(dev, device_name);
269 return (BUS_PROBE_DEFAULT);
276 /*********************************************************************
277 * Device initialization routine
279 * The attach entry point is called when the driver is being loaded.
280 * This routine identifies the type of hardware, allocates all resources
281 * and initializes the hardware.
283 * return 0 on success, positive on failure
284 *********************************************************************/
287 ixlv_attach(device_t dev)
294 INIT_DBG_DEV(dev, "begin");
296 /* Allocate, clear, and link in our primary soft structure */
297 sc = device_get_softc(dev);
298 sc->dev = sc->osdep.dev = dev;
303 /* Initialize hw struct */
306 /* Allocate filter lists */
307 ixlv_init_filters(sc);
310 mtx_init(&sc->mtx, device_get_nameunit(dev),
311 "IXL SC Lock", MTX_DEF);
313 /* Set up the timer callout */
314 callout_init_mtx(&sc->timer, &sc->mtx, 0);
316 /* Do PCI setup - map BAR0, etc */
317 if (ixlv_allocate_pci_resources(sc)) {
318 device_printf(dev, "%s: Allocation of PCI resources failed\n",
324 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
326 error = i40e_set_mac_type(hw);
328 device_printf(dev, "%s: set_mac_type failed: %d\n",
333 error = ixlv_reset_complete(hw);
335 device_printf(dev, "%s: Device is still being reset\n",
340 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
342 error = ixlv_setup_vc(sc);
344 device_printf(dev, "%s: Error setting up PF comms, %d\n",
349 INIT_DBG_DEV(dev, "PF API version verified");
351 /* Need API version before sending reset message */
352 error = ixlv_reset(sc);
354 device_printf(dev, "VF reset failed; reload the driver\n");
358 INIT_DBG_DEV(dev, "VF reset complete");
360 /* Ask for VF config from PF */
361 error = ixlv_vf_config(sc);
363 device_printf(dev, "Error getting configuration from PF: %d\n",
368 INIT_DBG_DEV(dev, "VF config from PF:");
369 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
370 sc->vf_res->num_vsis,
371 sc->vf_res->num_queue_pairs,
372 sc->vf_res->max_vectors,
373 sc->vf_res->max_mtu);
374 INIT_DBG_DEV(dev, "Offload flags: %#010x",
375 sc->vf_res->vf_offload_flags);
377 /* got VF config message back from PF, now we can parse it */
378 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
379 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
380 sc->vsi_res = &sc->vf_res->vsi_res[i];
383 device_printf(dev, "%s: no LAN VSI found\n", __func__);
388 INIT_DBG_DEV(dev, "Resource Acquisition complete");
390 /* If no mac address was assigned just make a random one */
391 if (!ixlv_check_ether_addr(hw->mac.addr)) {
392 u8 addr[ETHER_ADDR_LEN];
393 arc4rand(&addr, sizeof(addr), 0);
396 bcopy(addr, hw->mac.addr, sizeof(addr));
399 vsi->id = sc->vsi_res->vsi_id;
400 vsi->back = (void *)sc;
403 /* This allocates the memory and early settings */
404 if (ixlv_setup_queues(sc) != 0) {
405 device_printf(dev, "%s: setup queues failed!\n",
411 /* Setup the stack interface */
412 if (ixlv_setup_interface(dev, sc) != 0) {
413 device_printf(dev, "%s: setup interface failed!\n",
419 INIT_DBG_DEV(dev, "Queue memory and interface setup");
421 /* Do queue interrupt setup */
422 ixlv_assign_msix(sc);
424 /* Start AdminQ taskqueue */
425 ixlv_init_taskqueue(sc);
427 /* Initialize stats */
428 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
429 ixlv_add_sysctls(sc);
431 /* Register for VLAN events */
432 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
433 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
434 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
435 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
437 /* We want AQ enabled early */
438 ixlv_enable_adminq_irq(hw);
440 /* Set things up to run init */
441 sc->init_state = IXLV_INIT_READY;
443 ixl_vc_init_mgr(sc, &sc->vc_mgr);
445 INIT_DBG_DEV(dev, "end");
449 ixlv_free_queues(vsi);
451 free(sc->vf_res, M_DEVBUF);
453 i40e_shutdown_adminq(hw);
455 ixlv_free_pci_resources(sc);
457 mtx_destroy(&sc->mtx);
458 ixlv_free_filters(sc);
459 INIT_DBG_DEV(dev, "end: error %d", error);
463 /*********************************************************************
464 * Device removal routine
466 * The detach entry point is called when the driver is being removed.
467 * This routine stops the adapter and deallocates all the resources
468 * that were allocated for driver operation.
470 * return 0 on success, positive on failure
471 *********************************************************************/
474 ixlv_detach(device_t dev)
476 struct ixlv_sc *sc = device_get_softc(dev);
477 struct ixl_vsi *vsi = &sc->vsi;
479 INIT_DBG_DEV(dev, "begin");
481 /* Make sure VLANS are not using driver */
482 if (vsi->ifp->if_vlantrunk != NULL) {
483 if_printf(vsi->ifp, "Vlan in use, detach first\n");
484 INIT_DBG_DEV(dev, "end");
489 ether_ifdetach(vsi->ifp);
490 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
493 mtx_unlock(&sc->mtx);
496 /* Unregister VLAN events */
497 if (vsi->vlan_attach != NULL)
498 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
499 if (vsi->vlan_detach != NULL)
500 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
503 callout_drain(&sc->vc_mgr.callout);
505 i40e_shutdown_adminq(&sc->hw);
506 taskqueue_free(sc->tq);
508 free(sc->vf_res, M_DEVBUF);
509 ixlv_free_pci_resources(sc);
510 ixlv_free_queues(vsi);
511 mtx_destroy(&sc->mtx);
512 ixlv_free_filters(sc);
514 bus_generic_detach(dev);
515 INIT_DBG_DEV(dev, "end");
519 /*********************************************************************
521 * Shutdown entry point
523 **********************************************************************/
526 ixlv_shutdown(device_t dev)
528 struct ixlv_sc *sc = device_get_softc(dev);
530 INIT_DBG_DEV(dev, "begin");
534 mtx_unlock(&sc->mtx);
536 INIT_DBG_DEV(dev, "end");
541 * Configure TXCSUM(IPV6) and TSO(4/6)
542 * - the hardware handles these together so we
546 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
548 /* Enable/disable TXCSUM/TSO4 */
549 if (!(ifp->if_capenable & IFCAP_TXCSUM)
550 && !(ifp->if_capenable & IFCAP_TSO4)) {
551 if (mask & IFCAP_TXCSUM) {
552 ifp->if_capenable |= IFCAP_TXCSUM;
553 /* enable TXCSUM, restore TSO if previously enabled */
554 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
555 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
556 ifp->if_capenable |= IFCAP_TSO4;
559 else if (mask & IFCAP_TSO4) {
560 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
561 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
563 "TSO4 requires txcsum, enabling both...\n");
565 } else if((ifp->if_capenable & IFCAP_TXCSUM)
566 && !(ifp->if_capenable & IFCAP_TSO4)) {
567 if (mask & IFCAP_TXCSUM)
568 ifp->if_capenable &= ~IFCAP_TXCSUM;
569 else if (mask & IFCAP_TSO4)
570 ifp->if_capenable |= IFCAP_TSO4;
571 } else if((ifp->if_capenable & IFCAP_TXCSUM)
572 && (ifp->if_capenable & IFCAP_TSO4)) {
573 if (mask & IFCAP_TXCSUM) {
574 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
575 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
577 "TSO4 requires txcsum, disabling both...\n");
578 } else if (mask & IFCAP_TSO4)
579 ifp->if_capenable &= ~IFCAP_TSO4;
582 /* Enable/disable TXCSUM_IPV6/TSO6 */
583 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
584 && !(ifp->if_capenable & IFCAP_TSO6)) {
585 if (mask & IFCAP_TXCSUM_IPV6) {
586 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
587 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
588 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
589 ifp->if_capenable |= IFCAP_TSO6;
591 } else if (mask & IFCAP_TSO6) {
592 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
593 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
595 "TSO6 requires txcsum6, enabling both...\n");
597 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
598 && !(ifp->if_capenable & IFCAP_TSO6)) {
599 if (mask & IFCAP_TXCSUM_IPV6)
600 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
601 else if (mask & IFCAP_TSO6)
602 ifp->if_capenable |= IFCAP_TSO6;
603 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604 && (ifp->if_capenable & IFCAP_TSO6)) {
605 if (mask & IFCAP_TXCSUM_IPV6) {
606 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
607 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
609 "TSO6 requires txcsum6, disabling both...\n");
610 } else if (mask & IFCAP_TSO6)
611 ifp->if_capenable &= ~IFCAP_TSO6;
615 /*********************************************************************
618 * ixlv_ioctl is called when the user wants to configure the
621 * return 0 on success, positive on failure
622 **********************************************************************/
625 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
627 struct ixl_vsi *vsi = ifp->if_softc;
628 struct ixlv_sc *sc = vsi->back;
629 struct ifreq *ifr = (struct ifreq *)data;
630 #if defined(INET) || defined(INET6)
631 struct ifaddr *ifa = (struct ifaddr *)data;
632 bool avoid_reset = FALSE;
641 if (ifa->ifa_addr->sa_family == AF_INET)
645 if (ifa->ifa_addr->sa_family == AF_INET6)
648 #if defined(INET) || defined(INET6)
650 ** Calling init results in link renegotiation,
651 ** so we avoid doing it when possible.
654 ifp->if_flags |= IFF_UP;
655 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
658 if (!(ifp->if_flags & IFF_NOARP))
659 arp_ifinit(ifp, ifa);
662 error = ether_ioctl(ifp, command, data);
666 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
668 if (ifr->ifr_mtu > IXL_MAX_FRAME -
669 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
671 IOCTL_DBG_IF(ifp, "mtu too large");
673 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
674 // ERJ: Interestingly enough, these types don't match
675 ifp->if_mtu = (u_long)ifr->ifr_mtu;
676 vsi->max_frame_size =
677 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
678 + ETHER_VLAN_ENCAP_LEN;
679 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
680 ixlv_init_locked(sc);
682 mtx_unlock(&sc->mtx);
685 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
687 if (ifp->if_flags & IFF_UP) {
688 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
689 ixlv_init_locked(sc);
691 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
693 sc->if_flags = ifp->if_flags;
694 mtx_unlock(&sc->mtx);
697 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
698 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
700 ixlv_disable_intr(vsi);
702 ixlv_enable_intr(vsi);
703 mtx_unlock(&sc->mtx);
707 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
708 if (sc->init_state == IXLV_RUNNING) {
710 ixlv_disable_intr(vsi);
712 ixlv_enable_intr(vsi);
713 mtx_unlock(&sc->mtx);
718 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
719 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
723 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
726 ixlv_cap_txcsum_tso(vsi, ifp, mask);
728 if (mask & IFCAP_RXCSUM)
729 ifp->if_capenable ^= IFCAP_RXCSUM;
730 if (mask & IFCAP_RXCSUM_IPV6)
731 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
732 if (mask & IFCAP_LRO)
733 ifp->if_capenable ^= IFCAP_LRO;
734 if (mask & IFCAP_VLAN_HWTAGGING)
735 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
736 if (mask & IFCAP_VLAN_HWFILTER)
737 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
738 if (mask & IFCAP_VLAN_HWTSO)
739 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
740 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
743 VLAN_CAPABILITIES(ifp);
749 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
750 error = ether_ioctl(ifp, command, data);
758 ** To do a reinit on the VF is unfortunately more complicated
759 ** than a physical device, we must have the PF more or less
760 ** completely recreate our memory, so many things that were
761 ** done only once at attach in traditional drivers now must be
762 ** redone at each reinitialization. This function does that
763 ** 'prelude' so we can then call the normal locked init code.
766 ixlv_reinit_locked(struct ixlv_sc *sc)
768 struct i40e_hw *hw = &sc->hw;
769 struct ixl_vsi *vsi = &sc->vsi;
770 struct ifnet *ifp = vsi->ifp;
771 struct ixlv_mac_filter *mf, *mf_temp;
772 struct ixlv_vlan_filter *vf;
775 INIT_DBG_IF(ifp, "begin");
777 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
780 error = ixlv_reset(sc);
782 INIT_DBG_IF(ifp, "VF was reset");
784 /* set the state in case we went thru RESET */
785 sc->init_state = IXLV_RUNNING;
788 ** Resetting the VF drops all filters from hardware;
789 ** we need to mark them to be re-added in init.
791 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
792 if (mf->flags & IXL_FILTER_DEL) {
793 SLIST_REMOVE(sc->mac_filters, mf,
794 ixlv_mac_filter, next);
797 mf->flags |= IXL_FILTER_ADD;
799 if (vsi->num_vlans != 0)
800 SLIST_FOREACH(vf, sc->vlan_filters, next)
801 vf->flags = IXL_FILTER_ADD;
802 else { /* clean any stale filters */
803 while (!SLIST_EMPTY(sc->vlan_filters)) {
804 vf = SLIST_FIRST(sc->vlan_filters);
805 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
810 ixlv_enable_adminq_irq(hw);
811 ixl_vc_flush(&sc->vc_mgr);
813 INIT_DBG_IF(ifp, "end");
818 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
819 enum i40e_status_code code)
826 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
827 * happens while a command is in progress, so we don't print an error
830 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
831 if_printf(sc->vsi.ifp,
832 "Error %d waiting for PF to complete operation %d\n",
838 ixlv_init_locked(struct ixlv_sc *sc)
840 struct i40e_hw *hw = &sc->hw;
841 struct ixl_vsi *vsi = &sc->vsi;
842 struct ixl_queue *que = vsi->queues;
843 struct ifnet *ifp = vsi->ifp;
846 INIT_DBG_IF(ifp, "begin");
848 IXLV_CORE_LOCK_ASSERT(sc);
850 /* Do a reinit first if an init has already been done */
851 if ((sc->init_state == IXLV_RUNNING) ||
852 (sc->init_state == IXLV_RESET_REQUIRED) ||
853 (sc->init_state == IXLV_RESET_PENDING))
854 error = ixlv_reinit_locked(sc);
855 /* Don't bother with init if we failed reinit */
859 /* Remove existing MAC filter if new MAC addr is set */
860 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
861 error = ixlv_del_mac_filter(sc, hw->mac.addr);
863 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
864 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
868 /* Check for an LAA mac address... */
869 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM)
875 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
876 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
877 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
879 /* Add mac filter for this VF to PF */
880 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
881 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
882 if (!error || error == EEXIST)
883 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
884 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
888 /* Setup vlan's if needed */
889 ixlv_setup_vlan_filters(sc);
891 /* Prepare the queues for operation */
892 for (int i = 0; i < vsi->num_queues; i++, que++) {
893 struct rx_ring *rxr = &que->rxr;
895 ixl_init_tx_ring(que);
897 if (vsi->max_frame_size <= MCLBYTES)
898 rxr->mbuf_sz = MCLBYTES;
900 rxr->mbuf_sz = MJUMPAGESIZE;
901 ixl_init_rx_ring(que);
904 /* Configure queues */
905 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
906 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
912 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
913 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
916 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
917 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
919 /* Start the local timer */
920 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
922 sc->init_state = IXLV_RUNNING;
925 INIT_DBG_IF(ifp, "end");
930 ** Init entry point for the stack
935 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
936 struct ixlv_sc *sc = vsi->back;
940 ixlv_init_locked(sc);
941 mtx_unlock(&sc->mtx);
943 /* Wait for init_locked to finish */
944 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
945 && ++retries < IXLV_AQ_MAX_ERR) {
948 if (retries >= IXLV_AQ_MAX_ERR)
950 "Init failed to complete in allotted time!\n");
954 * ixlv_attach() helper function; gathers information about
955 * the (virtual) hardware for use elsewhere in the driver.
958 ixlv_init_hw(struct ixlv_sc *sc)
960 struct i40e_hw *hw = &sc->hw;
961 device_t dev = sc->dev;
963 /* Save off the information about this board */
964 hw->vendor_id = pci_get_vendor(dev);
965 hw->device_id = pci_get_device(dev);
966 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
967 hw->subsystem_vendor_id =
968 pci_read_config(dev, PCIR_SUBVEND_0, 2);
969 hw->subsystem_device_id =
970 pci_read_config(dev, PCIR_SUBDEV_0, 2);
972 hw->bus.device = pci_get_slot(dev);
973 hw->bus.func = pci_get_function(dev);
977 * ixlv_attach() helper function; initalizes the admin queue
978 * and attempts to establish contact with the PF by
979 * retrying the initial "API version" message several times
980 * or until the PF responds.
983 ixlv_setup_vc(struct ixlv_sc *sc)
985 struct i40e_hw *hw = &sc->hw;
986 device_t dev = sc->dev;
987 int error = 0, ret_error = 0, asq_retries = 0;
988 bool send_api_ver_retried = 0;
990 /* Need to set these AQ paramters before initializing AQ */
991 hw->aq.num_arq_entries = IXL_AQ_LEN;
992 hw->aq.num_asq_entries = IXL_AQ_LEN;
993 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
994 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
996 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
997 /* Initialize admin queue */
998 error = i40e_init_adminq(hw);
1000 device_printf(dev, "%s: init_adminq failed: %d\n",
1006 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1007 " send_api_ver attempt %d", i+1);
1010 /* Send VF's API version */
1011 error = ixlv_send_api_ver(sc);
1013 i40e_shutdown_adminq(hw);
1015 device_printf(dev, "%s: unable to send api"
1016 " version to PF on attempt %d, error %d\n",
1017 __func__, i+1, error);
1021 while (!i40e_asq_done(hw)) {
1022 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1023 i40e_shutdown_adminq(hw);
1024 DDPRINTF(dev, "Admin Queue timeout "
1025 "(waiting for send_api_ver), %d more retries...",
1026 IXLV_AQ_MAX_ERR - (i + 1));
1030 i40e_msec_delay(10);
1032 if (asq_retries > IXLV_AQ_MAX_ERR)
1035 INIT_DBG_DEV(dev, "Sent API version message to PF");
1037 /* Verify that the VF accepts the PF's API version */
1038 error = ixlv_verify_api_ver(sc);
1039 if (error == ETIMEDOUT) {
1040 if (!send_api_ver_retried) {
1041 /* Resend message, one more time */
1042 send_api_ver_retried++;
1044 "%s: Timeout while verifying API version on first"
1045 " try!\n", __func__);
1049 "%s: Timeout while verifying API version on second"
1050 " try!\n", __func__);
1057 "%s: Unable to verify API version,"
1058 " error %d\n", __func__, error);
1065 i40e_shutdown_adminq(hw);
1070 * ixlv_attach() helper function; asks the PF for this VF's
1071 * configuration, and saves the information if it receives it.
1074 ixlv_vf_config(struct ixlv_sc *sc)
1076 struct i40e_hw *hw = &sc->hw;
1077 device_t dev = sc->dev;
1078 int bufsz, error = 0, ret_error = 0;
1079 int asq_retries, retried = 0;
1082 error = ixlv_send_vf_config_msg(sc);
1085 "%s: Unable to send VF config request, attempt %d,"
1086 " error %d\n", __func__, retried + 1, error);
1091 while (!i40e_asq_done(hw)) {
1092 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1093 device_printf(dev, "%s: Admin Queue timeout "
1094 "(waiting for send_vf_config_msg), attempt %d\n",
1095 __func__, retried + 1);
1099 i40e_msec_delay(10);
1102 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1106 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1107 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1108 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1111 "%s: Unable to allocate memory for VF configuration"
1112 " message from PF on attempt %d\n", __func__, retried + 1);
1118 /* Check for VF config response */
1119 error = ixlv_get_vf_config(sc);
1120 if (error == ETIMEDOUT) {
1121 /* The 1st time we timeout, send the configuration message again */
1127 "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1132 "%s: Unable to get VF configuration from PF after %d tries!\n",
1133 __func__, retried + 1);
1139 free(sc->vf_res, M_DEVBUF);
1145 * Allocate MSI/X vectors, setup the AQ vector early
1148 ixlv_init_msix(struct ixlv_sc *sc)
1150 device_t dev = sc->dev;
1151 int rid, want, vectors, queues, available;
1153 rid = PCIR_BAR(IXL_BAR);
1154 sc->msix_mem = bus_alloc_resource_any(dev,
1155 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1156 if (!sc->msix_mem) {
1157 /* May not be enabled */
1158 device_printf(sc->dev,
1159 "Unable to map MSIX table \n");
1163 available = pci_msix_count(dev);
1164 if (available == 0) { /* system has msix disabled */
1165 bus_release_resource(dev, SYS_RES_MEMORY,
1167 sc->msix_mem = NULL;
1171 /* Figure out a reasonable auto config value */
1172 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1174 /* Override with hardcoded value if sane */
1175 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1176 queues = ixlv_max_queues;
1178 /* If we're doing RSS, clamp at the number of RSS buckets */
1179 if (queues > rss_getnumbuckets())
1180 queues = rss_getnumbuckets();
1182 /* Enforce the VF max value */
1183 if (queues > IXLV_MAX_QUEUES)
1184 queues = IXLV_MAX_QUEUES;
1187 ** Want one vector (RX/TX pair) per queue
1188 ** plus an additional for the admin queue.
1191 if (want <= available) /* Have enough */
1194 device_printf(sc->dev,
1195 "MSIX Configuration Problem, "
1196 "%d vectors available but %d wanted!\n",
1203 * If we're doing RSS, the number of queues needs to
1204 * match the number of RSS buckets that are configured.
1206 * + If there's more queues than RSS buckets, we'll end
1207 * up with queues that get no traffic.
1209 * + If there's more RSS buckets than queues, we'll end
1210 * up having multiple RSS buckets map to the same queue,
1211 * so there'll be some contention.
1213 if (queues != rss_getnumbuckets()) {
1215 "%s: queues (%d) != RSS buckets (%d)"
1216 "; performance will be impacted.\n",
1217 __func__, queues, rss_getnumbuckets());
1221 if (pci_alloc_msix(dev, &vectors) == 0) {
1222 device_printf(sc->dev,
1223 "Using MSIX interrupts with %d vectors\n", vectors);
1225 sc->vsi.num_queues = queues;
1229 ** Explicitly set the guest PCI BUSMASTER capability
1230 ** and we must rewrite the ENABLE in the MSIX control
1231 ** register again at this point to cause the host to
1232 ** successfully initialize us.
1237 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1238 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1239 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1240 pci_find_cap(dev, PCIY_MSIX, &rid);
1241 rid += PCIR_MSIX_CTRL;
1242 msix_ctrl = pci_read_config(dev, rid, 2);
1243 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1244 pci_write_config(dev, rid, msix_ctrl, 2);
1247 /* Next we need to setup the vector for the Admin Queue */
1248 rid = 1; // zero vector + 1
1249 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1250 &rid, RF_SHAREABLE | RF_ACTIVE);
1251 if (sc->res == NULL) {
1252 device_printf(dev,"Unable to allocate"
1253 " bus resource: AQ interrupt \n");
1256 if (bus_setup_intr(dev, sc->res,
1257 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1258 ixlv_msix_adminq, sc, &sc->tag)) {
1260 device_printf(dev, "Failed to register AQ handler");
1263 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1268 /* The VF driver MUST use MSIX */
1273 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1276 device_t dev = sc->dev;
1279 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1282 if (!(sc->pci_mem)) {
1283 device_printf(dev,"Unable to allocate bus resource: memory\n");
1287 sc->osdep.mem_bus_space_tag =
1288 rman_get_bustag(sc->pci_mem);
1289 sc->osdep.mem_bus_space_handle =
1290 rman_get_bushandle(sc->pci_mem);
1291 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1292 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1293 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1295 sc->hw.back = &sc->osdep;
1297 /* Disable adminq interrupts */
1298 ixlv_disable_adminq_irq(&sc->hw);
1301 ** Now setup MSI/X, it will return
1302 ** us the number of supported vectors
1304 sc->msix = ixlv_init_msix(sc);
1306 /* We fail without MSIX support */
1314 ixlv_free_pci_resources(struct ixlv_sc *sc)
1316 struct ixl_vsi *vsi = &sc->vsi;
1317 struct ixl_queue *que = vsi->queues;
1318 device_t dev = sc->dev;
1320 /* We may get here before stations are setup */
1325 ** Release all msix queue resources:
1327 for (int i = 0; i < vsi->num_queues; i++, que++) {
1328 int rid = que->msix + 1;
1329 if (que->tag != NULL) {
1330 bus_teardown_intr(dev, que->res, que->tag);
1333 if (que->res != NULL)
1334 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1338 /* Clean the AdminQ interrupt */
1339 if (sc->tag != NULL) {
1340 bus_teardown_intr(dev, sc->res, sc->tag);
1343 if (sc->res != NULL)
1344 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1346 pci_release_msi(dev);
1348 if (sc->msix_mem != NULL)
1349 bus_release_resource(dev, SYS_RES_MEMORY,
1350 PCIR_BAR(IXL_BAR), sc->msix_mem);
1352 if (sc->pci_mem != NULL)
1353 bus_release_resource(dev, SYS_RES_MEMORY,
1354 PCIR_BAR(0), sc->pci_mem);
1360 * Create taskqueue and tasklet for Admin Queue interrupts.
1363 ixlv_init_taskqueue(struct ixlv_sc *sc)
1367 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1369 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1370 taskqueue_thread_enqueue, &sc->tq);
1371 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1372 device_get_nameunit(sc->dev));
1377 /*********************************************************************
1379 * Setup MSIX Interrupt resources and handlers for the VSI queues
1381 **********************************************************************/
1383 ixlv_assign_msix(struct ixlv_sc *sc)
1385 device_t dev = sc->dev;
1386 struct ixl_vsi *vsi = &sc->vsi;
1387 struct ixl_queue *que = vsi->queues;
1388 struct tx_ring *txr;
1389 int error, rid, vector = 1;
1394 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1398 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1399 RF_SHAREABLE | RF_ACTIVE);
1400 if (que->res == NULL) {
1401 device_printf(dev,"Unable to allocate"
1402 " bus resource: que interrupt [%d]\n", vector);
1405 /* Set the handler function */
1406 error = bus_setup_intr(dev, que->res,
1407 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1408 ixlv_msix_que, que, &que->tag);
1411 device_printf(dev, "Failed to register que handler");
1414 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1415 /* Bind the vector to a CPU */
1417 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1419 bus_bind_intr(dev, que->res, cpu_id);
1421 vsi->que_mask |= (u64)(1 << que->msix);
1422 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1423 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1424 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1425 taskqueue_thread_enqueue, &que->tq);
1427 CPU_SETOF(cpu_id, &cpu_mask);
1428 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1429 &cpu_mask, "%s (bucket %d)",
1430 device_get_nameunit(dev), cpu_id);
1432 taskqueue_start_threads(&que->tq, 1, PI_NET,
1433 "%s que", device_get_nameunit(dev));
1442 ** Requests a VF reset from the PF.
1444 ** Requires the VF's Admin Queue to be initialized.
1447 ixlv_reset(struct ixlv_sc *sc)
1449 struct i40e_hw *hw = &sc->hw;
1450 device_t dev = sc->dev;
1453 /* Ask the PF to reset us if we are initiating */
1454 if (sc->init_state != IXLV_RESET_PENDING)
1455 ixlv_request_reset(sc);
1457 i40e_msec_delay(100);
1458 error = ixlv_reset_complete(hw);
1460 device_printf(dev, "%s: VF reset failed\n",
1465 error = i40e_shutdown_adminq(hw);
1467 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1472 error = i40e_init_adminq(hw);
1474 device_printf(dev, "%s: init_adminq failed: %d\n",
1483 ixlv_reset_complete(struct i40e_hw *hw)
1487 for (int i = 0; i < 100; i++) {
1488 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1489 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1491 if ((reg == I40E_VFR_VFACTIVE) ||
1492 (reg == I40E_VFR_COMPLETED))
1494 i40e_msec_delay(100);
1501 /*********************************************************************
1503 * Setup networking device structure and register an interface.
1505 **********************************************************************/
1507 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1510 struct ixl_vsi *vsi = &sc->vsi;
1511 struct ixl_queue *que = vsi->queues;
1513 INIT_DBG_DEV(dev, "begin");
1515 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1517 device_printf(dev, "%s: could not allocate ifnet"
1518 " structure!\n", __func__);
1522 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1524 ifp->if_mtu = ETHERMTU;
1525 ifp->if_baudrate = 4000000000; // ??
1526 ifp->if_init = ixlv_init;
1527 ifp->if_softc = vsi;
1528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1529 ifp->if_ioctl = ixlv_ioctl;
1531 #if __FreeBSD_version >= 1100000
1532 if_setgetcounterfn(ifp, ixl_get_counter);
1535 ifp->if_transmit = ixl_mq_start;
1537 ifp->if_qflush = ixl_qflush;
1538 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1540 ether_ifattach(ifp, sc->hw.mac.addr);
1542 vsi->max_frame_size =
1543 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1544 + ETHER_VLAN_ENCAP_LEN;
1547 * Tell the upper layer(s) we support long frames.
1549 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1551 ifp->if_capabilities |= IFCAP_HWCSUM;
1552 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1553 ifp->if_capabilities |= IFCAP_TSO;
1554 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1556 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1561 ifp->if_capenable = ifp->if_capabilities;
1564 ** Don't turn this on by default, if vlans are
1565 ** created on another pseudo device (eg. lagg)
1566 ** then vlan events are not passed thru, breaking
1567 ** operation, but with HW FILTER off it works. If
1568 ** using vlans directly on the ixl driver you can
1569 ** enable this and get full hardware tag filtering.
1571 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1574 * Specify the media types supported by this adapter and register
1575 * callbacks to update media and link information
1577 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1580 // JFV Add media types later?
1582 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1583 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1585 INIT_DBG_DEV(dev, "end");
1590 ** Allocate and setup the interface queues
1593 ixlv_setup_queues(struct ixlv_sc *sc)
1595 device_t dev = sc->dev;
1596 struct ixl_vsi *vsi;
1597 struct ixl_queue *que;
1598 struct tx_ring *txr;
1599 struct rx_ring *rxr;
1601 int error = I40E_SUCCESS;
1604 vsi->back = (void *)sc;
1608 /* Get memory for the station queues */
1610 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1611 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1612 device_printf(dev, "Unable to allocate queue memory\n");
1617 for (int i = 0; i < vsi->num_queues; i++) {
1618 que = &vsi->queues[i];
1619 que->num_desc = ixlv_ringsz;
1622 /* mark the queue as active */
1623 vsi->active_queues |= (u64)1 << que->me;
1627 txr->tail = I40E_QTX_TAIL1(que->me);
1628 /* Initialize the TX lock */
1629 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1630 device_get_nameunit(dev), que->me);
1631 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1633 ** Create the TX descriptor ring, the extra int is
1634 ** added as the location for HEAD WB.
1636 tsize = roundup2((que->num_desc *
1637 sizeof(struct i40e_tx_desc)) +
1638 sizeof(u32), DBA_ALIGN);
1639 if (i40e_allocate_dma_mem(&sc->hw,
1640 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1642 "Unable to allocate TX Descriptor memory\n");
1646 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1647 bzero((void *)txr->base, tsize);
1648 /* Now allocate transmit soft structs for the ring */
1649 if (ixl_allocate_tx_data(que)) {
1651 "Critical Failure setting up TX structures\n");
1655 /* Allocate a buf ring */
1656 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1657 M_WAITOK, &txr->mtx);
1658 if (txr->br == NULL) {
1660 "Critical Failure setting up TX buf ring\n");
1666 * Next the RX queues...
1668 rsize = roundup2(que->num_desc *
1669 sizeof(union i40e_rx_desc), DBA_ALIGN);
1672 rxr->tail = I40E_QRX_TAIL1(que->me);
1674 /* Initialize the RX side lock */
1675 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1676 device_get_nameunit(dev), que->me);
1677 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1679 if (i40e_allocate_dma_mem(&sc->hw,
1680 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1682 "Unable to allocate RX Descriptor memory\n");
1686 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1687 bzero((void *)rxr->base, rsize);
1689 /* Allocate receive soft structs for the ring*/
1690 if (ixl_allocate_rx_data(que)) {
1692 "Critical Failure setting up receive structs\n");
1701 for (int i = 0; i < vsi->num_queues; i++) {
1702 que = &vsi->queues[i];
1706 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1708 i40e_free_dma_mem(&sc->hw, &txr->dma);
1710 free(vsi->queues, M_DEVBUF);
1717 ** This routine is run via an vlan config EVENT,
1718 ** it enables us to use the HW Filter table since
1719 ** we can get the vlan id. This just creates the
1720 ** entry in the soft version of the VFTA, init will
1721 ** repopulate the real table.
1724 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1726 struct ixl_vsi *vsi = arg;
1727 struct ixlv_sc *sc = vsi->back;
1728 struct ixlv_vlan_filter *v;
1731 if (ifp->if_softc != arg) /* Not our event */
1734 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1737 /* Sanity check - make sure it doesn't already exist */
1738 SLIST_FOREACH(v, sc->vlan_filters, next) {
1739 if (v->vlan == vtag)
1745 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1746 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1748 v->flags = IXL_FILTER_ADD;
1749 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1750 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1751 mtx_unlock(&sc->mtx);
1756 ** This routine is run via an vlan
1757 ** unconfig EVENT, remove our entry
1758 ** in the soft vfta.
1761 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1763 struct ixl_vsi *vsi = arg;
1764 struct ixlv_sc *sc = vsi->back;
1765 struct ixlv_vlan_filter *v;
1768 if (ifp->if_softc != arg)
1771 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1775 SLIST_FOREACH(v, sc->vlan_filters, next) {
1776 if (v->vlan == vtag) {
1777 v->flags = IXL_FILTER_DEL;
1783 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1784 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1785 mtx_unlock(&sc->mtx);
1790 ** Get a new filter and add it to the mac filter list.
1792 static struct ixlv_mac_filter *
1793 ixlv_get_mac_filter(struct ixlv_sc *sc)
1795 struct ixlv_mac_filter *f;
1797 f = malloc(sizeof(struct ixlv_mac_filter),
1798 M_DEVBUF, M_NOWAIT | M_ZERO);
1800 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1806 ** Find the filter with matching MAC address
1808 static struct ixlv_mac_filter *
1809 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1811 struct ixlv_mac_filter *f;
1814 SLIST_FOREACH(f, sc->mac_filters, next) {
1815 if (cmp_etheraddr(f->macaddr, macaddr)) {
1827 ** Admin Queue interrupt handler
1830 ixlv_msix_adminq(void *arg)
1832 struct ixlv_sc *sc = arg;
1833 struct i40e_hw *hw = &sc->hw;
1836 reg = rd32(hw, I40E_VFINT_ICR01);
1837 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1839 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1840 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1841 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1844 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1849 ixlv_enable_intr(struct ixl_vsi *vsi)
1851 struct i40e_hw *hw = vsi->hw;
1852 struct ixl_queue *que = vsi->queues;
1854 ixlv_enable_adminq_irq(hw);
1855 for (int i = 0; i < vsi->num_queues; i++, que++)
1856 ixlv_enable_queue_irq(hw, que->me);
1860 ixlv_disable_intr(struct ixl_vsi *vsi)
1862 struct i40e_hw *hw = vsi->hw;
1863 struct ixl_queue *que = vsi->queues;
1865 ixlv_disable_adminq_irq(hw);
1866 for (int i = 0; i < vsi->num_queues; i++, que++)
1867 ixlv_disable_queue_irq(hw, que->me);
1872 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1874 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1875 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1877 rd32(hw, I40E_VFGEN_RSTAT);
1882 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1884 wr32(hw, I40E_VFINT_DYN_CTL01,
1885 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1886 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1887 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1889 rd32(hw, I40E_VFGEN_RSTAT);
1894 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1898 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1899 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
1900 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1904 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1906 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1907 rd32(hw, I40E_VFGEN_RSTAT);
1913 ** Provide a update to the queue RX
1914 ** interrupt moderation value.
1917 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1919 struct ixl_vsi *vsi = que->vsi;
1920 struct i40e_hw *hw = vsi->hw;
1921 struct rx_ring *rxr = &que->rxr;
1927 /* Idle, do nothing */
1928 if (rxr->bytes == 0)
1931 if (ixlv_dynamic_rx_itr) {
1932 rx_bytes = rxr->bytes/rxr->itr;
1935 /* Adjust latency range */
1936 switch (rxr->latency) {
1937 case IXL_LOW_LATENCY:
1938 if (rx_bytes > 10) {
1939 rx_latency = IXL_AVE_LATENCY;
1940 rx_itr = IXL_ITR_20K;
1943 case IXL_AVE_LATENCY:
1944 if (rx_bytes > 20) {
1945 rx_latency = IXL_BULK_LATENCY;
1946 rx_itr = IXL_ITR_8K;
1947 } else if (rx_bytes <= 10) {
1948 rx_latency = IXL_LOW_LATENCY;
1949 rx_itr = IXL_ITR_100K;
1952 case IXL_BULK_LATENCY:
1953 if (rx_bytes <= 20) {
1954 rx_latency = IXL_AVE_LATENCY;
1955 rx_itr = IXL_ITR_20K;
1960 rxr->latency = rx_latency;
1962 if (rx_itr != rxr->itr) {
1963 /* do an exponential smoothing */
1964 rx_itr = (10 * rx_itr * rxr->itr) /
1965 ((9 * rx_itr) + rxr->itr);
1966 rxr->itr = rx_itr & IXL_MAX_ITR;
1967 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1968 que->me), rxr->itr);
1970 } else { /* We may have have toggled to non-dynamic */
1971 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1972 vsi->rx_itr_setting = ixlv_rx_itr;
1973 /* Update the hardware if needed */
1974 if (rxr->itr != vsi->rx_itr_setting) {
1975 rxr->itr = vsi->rx_itr_setting;
1976 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1977 que->me), rxr->itr);
1987 ** Provide a update to the queue TX
1988 ** interrupt moderation value.
1991 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1993 struct ixl_vsi *vsi = que->vsi;
1994 struct i40e_hw *hw = vsi->hw;
1995 struct tx_ring *txr = &que->txr;
2001 /* Idle, do nothing */
2002 if (txr->bytes == 0)
2005 if (ixlv_dynamic_tx_itr) {
2006 tx_bytes = txr->bytes/txr->itr;
2009 switch (txr->latency) {
2010 case IXL_LOW_LATENCY:
2011 if (tx_bytes > 10) {
2012 tx_latency = IXL_AVE_LATENCY;
2013 tx_itr = IXL_ITR_20K;
2016 case IXL_AVE_LATENCY:
2017 if (tx_bytes > 20) {
2018 tx_latency = IXL_BULK_LATENCY;
2019 tx_itr = IXL_ITR_8K;
2020 } else if (tx_bytes <= 10) {
2021 tx_latency = IXL_LOW_LATENCY;
2022 tx_itr = IXL_ITR_100K;
2025 case IXL_BULK_LATENCY:
2026 if (tx_bytes <= 20) {
2027 tx_latency = IXL_AVE_LATENCY;
2028 tx_itr = IXL_ITR_20K;
2033 txr->latency = tx_latency;
2035 if (tx_itr != txr->itr) {
2036 /* do an exponential smoothing */
2037 tx_itr = (10 * tx_itr * txr->itr) /
2038 ((9 * tx_itr) + txr->itr);
2039 txr->itr = tx_itr & IXL_MAX_ITR;
2040 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2041 que->me), txr->itr);
2044 } else { /* We may have have toggled to non-dynamic */
2045 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2046 vsi->tx_itr_setting = ixlv_tx_itr;
2047 /* Update the hardware if needed */
2048 if (txr->itr != vsi->tx_itr_setting) {
2049 txr->itr = vsi->tx_itr_setting;
2050 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2051 que->me), txr->itr);
2062 ** MSIX Interrupt Handlers and Tasklets
2066 ixlv_handle_que(void *context, int pending)
2068 struct ixl_queue *que = context;
2069 struct ixl_vsi *vsi = que->vsi;
2070 struct i40e_hw *hw = vsi->hw;
2071 struct tx_ring *txr = &que->txr;
2072 struct ifnet *ifp = vsi->ifp;
2075 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2076 more = ixl_rxeof(que, IXL_RX_LIMIT);
2077 mtx_lock(&txr->mtx);
2079 if (!drbr_empty(ifp, txr->br))
2080 ixl_mq_start_locked(ifp, txr);
2081 mtx_unlock(&txr->mtx);
2083 taskqueue_enqueue(que->tq, &que->task);
2088 /* Reenable this interrupt - hmmm */
2089 ixlv_enable_queue_irq(hw, que->me);
2094 /*********************************************************************
2096 * MSIX Queue Interrupt Service routine
2098 **********************************************************************/
2100 ixlv_msix_que(void *arg)
2102 struct ixl_queue *que = arg;
2103 struct ixl_vsi *vsi = que->vsi;
2104 struct i40e_hw *hw = vsi->hw;
2105 struct tx_ring *txr = &que->txr;
2106 bool more_tx, more_rx;
2108 /* Spurious interrupts are ignored */
2109 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2114 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2116 mtx_lock(&txr->mtx);
2117 more_tx = ixl_txeof(que);
2119 ** Make certain that if the stack
2120 ** has anything queued the task gets
2121 ** scheduled to handle it.
2123 if (!drbr_empty(vsi->ifp, txr->br))
2125 mtx_unlock(&txr->mtx);
2127 ixlv_set_queue_rx_itr(que);
2128 ixlv_set_queue_tx_itr(que);
2130 if (more_tx || more_rx)
2131 taskqueue_enqueue(que->tq, &que->task);
2133 ixlv_enable_queue_irq(hw, que->me);
2139 /*********************************************************************
2141 * Media Ioctl callback
2143 * This routine is called whenever the user queries the status of
2144 * the interface using ifconfig.
2146 **********************************************************************/
2148 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2150 struct ixl_vsi *vsi = ifp->if_softc;
2151 struct ixlv_sc *sc = vsi->back;
2153 INIT_DBG_IF(ifp, "begin");
2157 ixlv_update_link_status(sc);
2159 ifmr->ifm_status = IFM_AVALID;
2160 ifmr->ifm_active = IFM_ETHER;
2163 mtx_unlock(&sc->mtx);
2164 INIT_DBG_IF(ifp, "end: link not up");
2168 ifmr->ifm_status |= IFM_ACTIVE;
2169 /* Hardware is always full-duplex */
2170 ifmr->ifm_active |= IFM_FDX;
2171 mtx_unlock(&sc->mtx);
2172 INIT_DBG_IF(ifp, "end");
2176 /*********************************************************************
2178 * Media Ioctl callback
2180 * This routine is called when the user changes speed/duplex using
2181 * media/mediopt option with ifconfig.
2183 **********************************************************************/
2185 ixlv_media_change(struct ifnet * ifp)
2187 struct ixl_vsi *vsi = ifp->if_softc;
2188 struct ifmedia *ifm = &vsi->media;
2190 INIT_DBG_IF(ifp, "begin");
2192 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2195 INIT_DBG_IF(ifp, "end");
2200 /*********************************************************************
2201 * Multicast Initialization
2203 * This routine is called by init to reset a fresh state.
2205 **********************************************************************/
2208 ixlv_init_multi(struct ixl_vsi *vsi)
2210 struct ixlv_mac_filter *f;
2211 struct ixlv_sc *sc = vsi->back;
2214 IOCTL_DBG_IF(vsi->ifp, "begin");
2216 /* First clear any multicast filters */
2217 SLIST_FOREACH(f, sc->mac_filters, next) {
2218 if ((f->flags & IXL_FILTER_USED)
2219 && (f->flags & IXL_FILTER_MC)) {
2220 f->flags |= IXL_FILTER_DEL;
2225 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2226 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2229 IOCTL_DBG_IF(vsi->ifp, "end");
2233 ixlv_add_multi(struct ixl_vsi *vsi)
2235 struct ifmultiaddr *ifma;
2236 struct ifnet *ifp = vsi->ifp;
2237 struct ixlv_sc *sc = vsi->back;
2240 IOCTL_DBG_IF(ifp, "begin");
2242 if_maddr_rlock(ifp);
2244 ** Get a count, to decide if we
2245 ** simply use multicast promiscuous.
2247 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2248 if (ifma->ifma_addr->sa_family != AF_LINK)
2252 if_maddr_runlock(ifp);
2254 // TODO: Remove -- cannot set promiscuous mode in a VF
2255 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2256 /* delete all multicast filters */
2257 ixlv_init_multi(vsi);
2258 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2259 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2260 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2262 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2267 if_maddr_rlock(ifp);
2268 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2269 if (ifma->ifma_addr->sa_family != AF_LINK)
2271 if (!ixlv_add_mac_filter(sc,
2272 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2276 if_maddr_runlock(ifp);
2278 ** Notify AQ task that sw filters need to be
2282 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2283 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2286 IOCTL_DBG_IF(ifp, "end");
2290 ixlv_del_multi(struct ixl_vsi *vsi)
2292 struct ixlv_mac_filter *f;
2293 struct ifmultiaddr *ifma;
2294 struct ifnet *ifp = vsi->ifp;
2295 struct ixlv_sc *sc = vsi->back;
2299 IOCTL_DBG_IF(ifp, "begin");
2301 /* Search for removed multicast addresses */
2302 if_maddr_rlock(ifp);
2303 SLIST_FOREACH(f, sc->mac_filters, next) {
2304 if ((f->flags & IXL_FILTER_USED)
2305 && (f->flags & IXL_FILTER_MC)) {
2306 /* check if mac address in filter is in sc's list */
2308 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2309 if (ifma->ifma_addr->sa_family != AF_LINK)
2312 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2313 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2318 /* if this filter is not in the sc's list, remove it */
2319 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2320 f->flags |= IXL_FILTER_DEL;
2322 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2323 MAC_FORMAT_ARGS(f->macaddr));
2325 else if (match == FALSE)
2326 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2327 MAC_FORMAT_ARGS(f->macaddr));
2330 if_maddr_runlock(ifp);
2333 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2334 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2337 IOCTL_DBG_IF(ifp, "end");
2340 /*********************************************************************
2343 * This routine checks for link status,updates statistics,
2344 * and runs the watchdog check.
2346 **********************************************************************/
2349 ixlv_local_timer(void *arg)
2351 struct ixlv_sc *sc = arg;
2352 struct i40e_hw *hw = &sc->hw;
2353 struct ixl_vsi *vsi = &sc->vsi;
2354 struct ixl_queue *que = vsi->queues;
2355 device_t dev = sc->dev;
2359 IXLV_CORE_LOCK_ASSERT(sc);
2361 /* If Reset is in progress just bail */
2362 if (sc->init_state == IXLV_RESET_PENDING)
2365 /* Check for when PF triggers a VF reset */
2366 val = rd32(hw, I40E_VFGEN_RSTAT) &
2367 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2369 if (val != I40E_VFR_VFACTIVE
2370 && val != I40E_VFR_COMPLETED) {
2371 DDPRINTF(dev, "reset in progress! (%d)", val);
2375 ixlv_request_stats(sc);
2377 /* clean and process any events */
2378 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2381 ** Check status on the queues for a hang
2383 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2384 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2386 for (int i = 0; i < vsi->num_queues; i++,que++) {
2387 /* Any queues with outstanding work get a sw irq */
2389 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2391 ** Each time txeof runs without cleaning, but there
2392 ** are uncleaned descriptors it increments busy. If
2393 ** we get to 5 we declare it hung.
2395 if (que->busy == IXL_QUEUE_HUNG) {
2397 /* Mark the queue as inactive */
2398 vsi->active_queues &= ~((u64)1 << que->me);
2401 /* Check if we've come back from hung */
2402 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2403 vsi->active_queues |= ((u64)1 << que->me);
2405 if (que->busy >= IXL_MAX_TX_BUSY) {
2406 device_printf(dev,"Warning queue %d "
2407 "appears to be hung!\n", i);
2408 que->busy = IXL_QUEUE_HUNG;
2412 /* Only reset when all queues show hung */
2413 if (hung == vsi->num_queues)
2415 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2419 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2420 sc->init_state = IXLV_RESET_REQUIRED;
2421 ixlv_init_locked(sc);
2425 ** Note: this routine updates the OS on the link state
2426 ** the real check of the hardware only happens with
2427 ** a link interrupt.
2430 ixlv_update_link_status(struct ixlv_sc *sc)
2432 struct ixl_vsi *vsi = &sc->vsi;
2433 struct ifnet *ifp = vsi->ifp;
2436 if (vsi->link_active == FALSE) {
2438 if_printf(ifp,"Link is Up, %d Gbps\n",
2439 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2440 vsi->link_active = TRUE;
2441 if_link_state_change(ifp, LINK_STATE_UP);
2443 } else { /* Link down */
2444 if (vsi->link_active == TRUE) {
2446 if_printf(ifp,"Link is Down\n");
2447 if_link_state_change(ifp, LINK_STATE_DOWN);
2448 vsi->link_active = FALSE;
2455 /*********************************************************************
2457 * This routine disables all traffic on the adapter by issuing a
2458 * global reset on the MAC and deallocates TX/RX buffers.
2460 **********************************************************************/
2463 ixlv_stop(struct ixlv_sc *sc)
2469 INIT_DBG_IF(ifp, "begin");
2471 IXLV_CORE_LOCK_ASSERT(sc);
2473 ixl_vc_flush(&sc->vc_mgr);
2474 ixlv_disable_queues(sc);
2477 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2478 ((ticks - start) < hz/10))
2479 ixlv_do_adminq_locked(sc);
2481 /* Stop the local timer */
2482 callout_stop(&sc->timer);
2484 INIT_DBG_IF(ifp, "end");
2488 /*********************************************************************
2490 * Free all station queue structs.
2492 **********************************************************************/
2494 ixlv_free_queues(struct ixl_vsi *vsi)
2496 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2497 struct ixl_queue *que = vsi->queues;
2499 for (int i = 0; i < vsi->num_queues; i++, que++) {
2500 struct tx_ring *txr = &que->txr;
2501 struct rx_ring *rxr = &que->rxr;
2503 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2506 ixl_free_que_tx(que);
2508 i40e_free_dma_mem(&sc->hw, &txr->dma);
2510 IXL_TX_LOCK_DESTROY(txr);
2512 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2515 ixl_free_que_rx(que);
2517 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2519 IXL_RX_LOCK_DESTROY(rxr);
2522 free(vsi->queues, M_DEVBUF);
2527 ** ixlv_config_rss - setup RSS
2529 ** RSS keys and table are cleared on VF reset.
2532 ixlv_config_rss(struct ixlv_sc *sc)
2534 struct i40e_hw *hw = &sc->hw;
2535 struct ixl_vsi *vsi = &sc->vsi;
2537 u64 set_hena = 0, hena;
2540 u32 rss_hash_config;
2541 u32 rss_seed[IXL_KEYSZ];
2543 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2544 0x183cfd8c, 0xce880440, 0x580cbc3c,
2545 0x35897377, 0x328b25e1, 0x4fa98922,
2546 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2549 /* Don't set up RSS if using a single queue */
2550 if (vsi->num_queues == 1) {
2551 wr32(hw, I40E_VFQF_HENA(0), 0);
2552 wr32(hw, I40E_VFQF_HENA(1), 0);
2558 /* Fetch the configured RSS key */
2559 rss_getkey((uint8_t *) &rss_seed);
2561 /* Fill out hash function seed */
2562 for (i = 0; i <= IXL_KEYSZ; i++)
2563 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2565 /* Enable PCTYPES for RSS: */
2567 rss_hash_config = rss_gethashconfig();
2568 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2569 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2570 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2571 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2572 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2573 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2574 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2575 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2576 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2577 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2578 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2579 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2580 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2581 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2584 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2585 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2586 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2587 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2588 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2589 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2590 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2591 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2592 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2593 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2594 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2596 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2597 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2599 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2600 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2602 // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
2603 /* Populate the LUT with max no. of queues in round robin fashion */
2604 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2605 if (j == vsi->num_queues)
2609 * Fetch the RSS bucket id for the given indirection entry.
2610 * Cap it at the number of configured buckets (which is
2613 que_id = rss_get_indirection_to_bucket(i);
2614 que_id = que_id % vsi->num_queues;
2618 /* lut = 4-byte sliding window of 4 lut entries */
2619 lut = (lut << 8) | (que_id & 0xF);
2620 /* On i = 3, we have 4 entries in lut; write to the register */
2622 wr32(hw, I40E_VFQF_HLUT(i), lut);
2623 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2631 ** This routine refreshes vlan filters, called by init
2632 ** it scans the filter table and then updates the AQ
2635 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2637 struct ixl_vsi *vsi = &sc->vsi;
2638 struct ixlv_vlan_filter *f;
2641 if (vsi->num_vlans == 0)
2644 ** Scan the filter table for vlan entries,
2645 ** and if found call for the AQ update.
2647 SLIST_FOREACH(f, sc->vlan_filters, next)
2648 if (f->flags & IXL_FILTER_ADD)
2651 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2652 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2657 ** This routine adds new MAC filters to the sc's list;
2658 ** these are later added in hardware by sending a virtual
2662 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2664 struct ixlv_mac_filter *f;
2666 /* Does one already exist? */
2667 f = ixlv_find_mac_filter(sc, macaddr);
2669 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2670 MAC_FORMAT_ARGS(macaddr));
2674 /* If not, get a new empty filter */
2675 f = ixlv_get_mac_filter(sc);
2677 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2682 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2683 MAC_FORMAT_ARGS(macaddr));
2685 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2686 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2692 ** Marks a MAC filter for deletion.
2695 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2697 struct ixlv_mac_filter *f;
2699 f = ixlv_find_mac_filter(sc, macaddr);
2703 f->flags |= IXL_FILTER_DEL;
2708 ** Tasklet handler for MSIX Adminq interrupts
2709 ** - done outside interrupt context since it might sleep
2712 ixlv_do_adminq(void *context, int pending)
2714 struct ixlv_sc *sc = context;
2717 ixlv_do_adminq_locked(sc);
2718 mtx_unlock(&sc->mtx);
2723 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2725 struct i40e_hw *hw = &sc->hw;
2726 struct i40e_arq_event_info event;
2727 struct i40e_virtchnl_msg *v_msg;
2728 device_t dev = sc->dev;
2733 IXLV_CORE_LOCK_ASSERT(sc);
2735 event.buf_len = IXL_AQ_BUF_SZ;
2736 event.msg_buf = sc->aq_buffer;
2737 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2740 ret = i40e_clean_arq_element(hw, &event, &result);
2743 ixlv_vc_completion(sc, v_msg->v_opcode,
2744 v_msg->v_retval, event.msg_buf, event.msg_len);
2746 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2749 /* check for Admin queue errors */
2750 oldreg = reg = rd32(hw, hw->aq.arq.len);
2751 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2752 device_printf(dev, "ARQ VF Error detected\n");
2753 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2755 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2756 device_printf(dev, "ARQ Overflow Error detected\n");
2757 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2759 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2760 device_printf(dev, "ARQ Critical Error detected\n");
2761 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2764 wr32(hw, hw->aq.arq.len, reg);
2766 oldreg = reg = rd32(hw, hw->aq.asq.len);
2767 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2768 device_printf(dev, "ASQ VF Error detected\n");
2769 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2771 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2772 device_printf(dev, "ASQ Overflow Error detected\n");
2773 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2775 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2776 device_printf(dev, "ASQ Critical Error detected\n");
2777 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2780 wr32(hw, hw->aq.asq.len, reg);
2782 ixlv_enable_adminq_irq(hw);
2786 ixlv_add_sysctls(struct ixlv_sc *sc)
2788 device_t dev = sc->dev;
2789 struct ixl_vsi *vsi = &sc->vsi;
2790 struct i40e_eth_stats *es = &vsi->eth_stats;
2792 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2793 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2794 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2796 struct sysctl_oid *vsi_node, *queue_node;
2797 struct sysctl_oid_list *vsi_list, *queue_list;
2799 #define QUEUE_NAME_LEN 32
2800 char queue_namebuf[QUEUE_NAME_LEN];
2802 struct ixl_queue *queues = vsi->queues;
2803 struct tx_ring *txr;
2804 struct rx_ring *rxr;
2806 /* Driver statistics sysctls */
2807 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2808 CTLFLAG_RD, &sc->watchdog_events,
2809 "Watchdog timeouts");
2810 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2811 CTLFLAG_RD, &sc->admin_irq,
2812 "Admin Queue IRQ Handled");
2814 /* VSI statistics sysctls */
2815 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2816 CTLFLAG_RD, NULL, "VSI-specific statistics");
2817 vsi_list = SYSCTL_CHILDREN(vsi_node);
2819 struct ixl_sysctl_info ctls[] =
2821 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2822 {&es->rx_unicast, "ucast_pkts_rcvd",
2823 "Unicast Packets Received"},
2824 {&es->rx_multicast, "mcast_pkts_rcvd",
2825 "Multicast Packets Received"},
2826 {&es->rx_broadcast, "bcast_pkts_rcvd",
2827 "Broadcast Packets Received"},
2828 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2829 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2830 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2831 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2832 {&es->tx_multicast, "mcast_pkts_txd",
2833 "Multicast Packets Transmitted"},
2834 {&es->tx_broadcast, "bcast_pkts_txd",
2835 "Broadcast Packets Transmitted"},
2836 {&es->tx_errors, "tx_errors", "TX packet errors"},
2840 struct ixl_sysctl_info *entry = ctls;
2841 while (entry->stat != NULL)
2843 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2844 CTLFLAG_RD, entry->stat,
2845 entry->description);
2850 for (int q = 0; q < vsi->num_queues; q++) {
2851 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2852 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2853 CTLFLAG_RD, NULL, "Queue Name");
2854 queue_list = SYSCTL_CHILDREN(queue_node);
2856 txr = &(queues[q].txr);
2857 rxr = &(queues[q].rxr);
2859 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2860 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2861 "m_defrag() failed");
2862 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2863 CTLFLAG_RD, &(queues[q].dropped_pkts),
2864 "Driver dropped packets");
2865 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2866 CTLFLAG_RD, &(queues[q].irqs),
2867 "irqs on this queue");
2868 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2869 CTLFLAG_RD, &(queues[q].tso),
2871 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2872 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2873 "Driver tx dma failure in xmit");
2874 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2875 CTLFLAG_RD, &(txr->no_desc),
2876 "Queue No Descriptor Available");
2877 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2878 CTLFLAG_RD, &(txr->total_packets),
2879 "Queue Packets Transmitted");
2880 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2881 CTLFLAG_RD, &(txr->tx_bytes),
2882 "Queue Bytes Transmitted");
2883 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2884 CTLFLAG_RD, &(rxr->rx_packets),
2885 "Queue Packets Received");
2886 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2887 CTLFLAG_RD, &(rxr->rx_bytes),
2888 "Queue Bytes Received");
2890 /* Examine queue state */
2891 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2892 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2893 sizeof(struct ixl_queue),
2894 ixlv_sysctl_qtx_tail_handler, "IU",
2895 "Queue Transmit Descriptor Tail");
2896 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2897 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2898 sizeof(struct ixl_queue),
2899 ixlv_sysctl_qrx_tail_handler, "IU",
2900 "Queue Receive Descriptor Tail");
2905 ixlv_init_filters(struct ixlv_sc *sc)
2907 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2908 M_DEVBUF, M_NOWAIT | M_ZERO);
2909 SLIST_INIT(sc->mac_filters);
2910 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2911 M_DEVBUF, M_NOWAIT | M_ZERO);
2912 SLIST_INIT(sc->vlan_filters);
2917 ixlv_free_filters(struct ixlv_sc *sc)
2919 struct ixlv_mac_filter *f;
2920 struct ixlv_vlan_filter *v;
2922 while (!SLIST_EMPTY(sc->mac_filters)) {
2923 f = SLIST_FIRST(sc->mac_filters);
2924 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2927 while (!SLIST_EMPTY(sc->vlan_filters)) {
2928 v = SLIST_FIRST(sc->vlan_filters);
2929 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2936 * ixlv_sysctl_qtx_tail_handler
2937 * Retrieves I40E_QTX_TAIL1 value from hardware
2941 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2943 struct ixl_queue *que;
2947 que = ((struct ixl_queue *)oidp->oid_arg1);
2950 val = rd32(que->vsi->hw, que->txr.tail);
2951 error = sysctl_handle_int(oidp, &val, 0, req);
2952 if (error || !req->newptr)
2958 * ixlv_sysctl_qrx_tail_handler
2959 * Retrieves I40E_QRX_TAIL1 value from hardware
2963 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2965 struct ixl_queue *que;
2969 que = ((struct ixl_queue *)oidp->oid_arg1);
2972 val = rd32(que->vsi->hw, que->rxr.tail);
2973 error = sysctl_handle_int(oidp, &val, 0, req);
2974 if (error || !req->newptr)