1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
45 #include <net/rss_config.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixlv_driver_version[] = "1.2.6";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixlv_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixl_vendor_info_t ixlv_vendor_info_array[] =
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
67 /* required last entry */
71 /*********************************************************************
72 * Table of branding strings
73 *********************************************************************/
75 static char *ixlv_strings[] = {
76 "Intel(R) Ethernet Connection XL710 VF Driver"
80 /*********************************************************************
82 *********************************************************************/
83 static int ixlv_probe(device_t);
84 static int ixlv_attach(device_t);
85 static int ixlv_detach(device_t);
86 static int ixlv_shutdown(device_t);
87 static void ixlv_init_locked(struct ixlv_sc *);
88 static int ixlv_allocate_pci_resources(struct ixlv_sc *);
89 static void ixlv_free_pci_resources(struct ixlv_sc *);
90 static int ixlv_assign_msix(struct ixlv_sc *);
91 static int ixlv_init_msix(struct ixlv_sc *);
92 static int ixlv_init_taskqueue(struct ixlv_sc *);
93 static int ixlv_setup_queues(struct ixlv_sc *);
94 static void ixlv_config_rss(struct ixlv_sc *);
95 static void ixlv_stop(struct ixlv_sc *);
96 static void ixlv_add_multi(struct ixl_vsi *);
97 static void ixlv_del_multi(struct ixl_vsi *);
98 static void ixlv_free_queues(struct ixl_vsi *);
99 static int ixlv_setup_interface(device_t, struct ixlv_sc *);
101 static int ixlv_media_change(struct ifnet *);
102 static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
104 static void ixlv_local_timer(void *);
106 static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
107 static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
108 static void ixlv_init_filters(struct ixlv_sc *);
109 static void ixlv_free_filters(struct ixlv_sc *);
111 static void ixlv_msix_que(void *);
112 static void ixlv_msix_adminq(void *);
113 static void ixlv_do_adminq(void *, int);
114 static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
115 static void ixlv_handle_que(void *, int);
116 static int ixlv_reset(struct ixlv_sc *);
117 static int ixlv_reset_complete(struct i40e_hw *);
118 static void ixlv_set_queue_rx_itr(struct ixl_queue *);
119 static void ixlv_set_queue_tx_itr(struct ixl_queue *);
120 static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
121 enum i40e_status_code);
123 static void ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void ixlv_disable_queue_irq(struct i40e_hw *, int);
128 static void ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
132 static void ixlv_init_hw(struct ixlv_sc *);
133 static int ixlv_setup_vc(struct ixlv_sc *);
134 static int ixlv_vf_config(struct ixlv_sc *);
136 static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
137 struct ifnet *, int);
139 static void ixlv_add_sysctls(struct ixlv_sc *);
140 static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
141 static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
143 /*********************************************************************
144 * FreeBSD Device Interface Entry Points
145 *********************************************************************/
147 static device_method_t ixlv_methods[] = {
148 /* Device interface */
149 DEVMETHOD(device_probe, ixlv_probe),
150 DEVMETHOD(device_attach, ixlv_attach),
151 DEVMETHOD(device_detach, ixlv_detach),
152 DEVMETHOD(device_shutdown, ixlv_shutdown),
156 static driver_t ixlv_driver = {
157 "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
160 devclass_t ixlv_devclass;
161 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
163 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
164 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
167 ** TUNEABLE PARAMETERS:
170 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
171 "IXLV driver parameters");
174 ** Number of descriptors per ring:
175 ** - TX and RX are the same size
177 static int ixlv_ringsz = DEFAULT_RING;
178 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
179 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
180 &ixlv_ringsz, 0, "Descriptor Ring Size");
182 /* Set to zero to auto calculate */
183 int ixlv_max_queues = 0;
184 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
185 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
186 &ixlv_max_queues, 0, "Number of Queues");
189 ** Number of entries in Tx queue buf_ring.
190 ** Increasing this will reduce the number of
191 ** errors when transmitting fragmented UDP
194 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
195 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
196 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
197 &ixlv_txbrsz, 0, "TX Buf Ring Size");
200 ** Controls for Interrupt Throttling
201 ** - true/false for dynamic adjustment
202 ** - default values for static ITR
204 int ixlv_dynamic_rx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
207 &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
209 int ixlv_dynamic_tx_itr = 0;
210 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
212 &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
214 int ixlv_rx_itr = IXL_ITR_8K;
215 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
217 &ixlv_rx_itr, 0, "RX Interrupt Rate");
219 int ixlv_tx_itr = IXL_ITR_4K;
220 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
221 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
222 &ixlv_tx_itr, 0, "TX Interrupt Rate");
225 /*********************************************************************
226 * Device identification routine
228 * ixlv_probe determines if the driver should be loaded on
229 * the hardware based on PCI vendor/device id of the device.
231 * return BUS_PROBE_DEFAULT on success, positive on failure
232 *********************************************************************/
235 ixlv_probe(device_t dev)
237 ixl_vendor_info_t *ent;
239 u16 pci_vendor_id, pci_device_id;
240 u16 pci_subvendor_id, pci_subdevice_id;
241 char device_name[256];
243 INIT_DEBUGOUT("ixlv_probe: begin");
245 pci_vendor_id = pci_get_vendor(dev);
246 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
249 pci_device_id = pci_get_device(dev);
250 pci_subvendor_id = pci_get_subvendor(dev);
251 pci_subdevice_id = pci_get_subdevice(dev);
253 ent = ixlv_vendor_info_array;
254 while (ent->vendor_id != 0) {
255 if ((pci_vendor_id == ent->vendor_id) &&
256 (pci_device_id == ent->device_id) &&
258 ((pci_subvendor_id == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
261 ((pci_subdevice_id == ent->subdevice_id) ||
262 (ent->subdevice_id == 0))) {
263 sprintf(device_name, "%s, Version - %s",
264 ixlv_strings[ent->index],
265 ixlv_driver_version);
266 device_set_desc_copy(dev, device_name);
267 return (BUS_PROBE_DEFAULT);
274 /*********************************************************************
275 * Device initialization routine
277 * The attach entry point is called when the driver is being loaded.
278 * This routine identifies the type of hardware, allocates all resources
279 * and initializes the hardware.
281 * return 0 on success, positive on failure
282 *********************************************************************/
285 ixlv_attach(device_t dev)
292 INIT_DBG_DEV(dev, "begin");
294 /* Allocate, clear, and link in our primary soft structure */
295 sc = device_get_softc(dev);
296 sc->dev = sc->osdep.dev = dev;
301 /* Initialize hw struct */
304 /* Allocate filter lists */
305 ixlv_init_filters(sc);
308 mtx_init(&sc->mtx, device_get_nameunit(dev),
309 "IXL SC Lock", MTX_DEF);
311 /* Set up the timer callout */
312 callout_init_mtx(&sc->timer, &sc->mtx, 0);
314 /* Do PCI setup - map BAR0, etc */
315 if (ixlv_allocate_pci_resources(sc)) {
316 device_printf(dev, "%s: Allocation of PCI resources failed\n",
322 INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
324 error = i40e_set_mac_type(hw);
326 device_printf(dev, "%s: set_mac_type failed: %d\n",
331 error = ixlv_reset_complete(hw);
333 device_printf(dev, "%s: Device is still being reset\n",
338 INIT_DBG_DEV(dev, "VF Device is ready for configuration");
340 error = ixlv_setup_vc(sc);
342 device_printf(dev, "%s: Error setting up PF comms, %d\n",
347 INIT_DBG_DEV(dev, "PF API version verified");
349 /* TODO: Figure out why MDD events occur when this reset is removed. */
350 /* Need API version before sending reset message */
351 error = ixlv_reset(sc);
353 device_printf(dev, "VF reset failed; reload the driver\n");
357 INIT_DBG_DEV(dev, "VF reset complete");
359 /* Ask for VF config from PF */
360 error = ixlv_vf_config(sc);
362 device_printf(dev, "Error getting configuration from PF: %d\n",
367 INIT_DBG_DEV(dev, "VF config from PF:");
368 INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
369 sc->vf_res->num_vsis,
370 sc->vf_res->num_queue_pairs,
371 sc->vf_res->max_vectors,
372 sc->vf_res->max_mtu);
373 INIT_DBG_DEV(dev, "Offload flags: %#010x",
374 sc->vf_res->vf_offload_flags);
376 // TODO: Move this into ixlv_vf_config?
377 /* got VF config message back from PF, now we can parse it */
378 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
379 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
380 sc->vsi_res = &sc->vf_res->vsi_res[i];
383 device_printf(dev, "%s: no LAN VSI found\n", __func__);
388 INIT_DBG_DEV(dev, "Resource Acquisition complete");
390 /* If no mac address was assigned just make a random one */
391 if (!ixlv_check_ether_addr(hw->mac.addr)) {
392 u8 addr[ETHER_ADDR_LEN];
393 arc4rand(&addr, sizeof(addr), 0);
396 bcopy(addr, hw->mac.addr, sizeof(addr));
399 vsi->id = sc->vsi_res->vsi_id;
400 vsi->back = (void *)sc;
403 /* This allocates the memory and early settings */
404 if (ixlv_setup_queues(sc) != 0) {
405 device_printf(dev, "%s: setup queues failed!\n",
411 /* Setup the stack interface */
412 if (ixlv_setup_interface(dev, sc) != 0) {
413 device_printf(dev, "%s: setup interface failed!\n",
419 INIT_DBG_DEV(dev, "Queue memory and interface setup");
421 /* Do queue interrupt setup */
422 ixlv_assign_msix(sc);
424 /* Start AdminQ taskqueue */
425 ixlv_init_taskqueue(sc);
427 /* Initialize stats */
428 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
429 ixlv_add_sysctls(sc);
431 /* Register for VLAN events */
432 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
433 ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
434 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
435 ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
437 /* We want AQ enabled early */
438 ixlv_enable_adminq_irq(hw);
440 /* Set things up to run init */
441 sc->init_state = IXLV_INIT_READY;
443 ixl_vc_init_mgr(sc, &sc->vc_mgr);
445 INIT_DBG_DEV(dev, "end");
449 ixlv_free_queues(vsi);
451 free(sc->vf_res, M_DEVBUF);
453 i40e_shutdown_adminq(hw);
455 ixlv_free_pci_resources(sc);
457 mtx_destroy(&sc->mtx);
458 ixlv_free_filters(sc);
459 INIT_DBG_DEV(dev, "end: error %d", error);
463 /*********************************************************************
464 * Device removal routine
466 * The detach entry point is called when the driver is being removed.
467 * This routine stops the adapter and deallocates all the resources
468 * that were allocated for driver operation.
470 * return 0 on success, positive on failure
471 *********************************************************************/
474 ixlv_detach(device_t dev)
476 struct ixlv_sc *sc = device_get_softc(dev);
477 struct ixl_vsi *vsi = &sc->vsi;
479 INIT_DBG_DEV(dev, "begin");
481 /* Make sure VLANS are not using driver */
482 if (vsi->ifp->if_vlantrunk != NULL) {
483 if_printf(vsi->ifp, "Vlan in use, detach first\n");
484 INIT_DBG_DEV(dev, "end");
489 ether_ifdetach(vsi->ifp);
490 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
493 mtx_unlock(&sc->mtx);
496 /* Unregister VLAN events */
497 if (vsi->vlan_attach != NULL)
498 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
499 if (vsi->vlan_detach != NULL)
500 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
503 callout_drain(&sc->vc_mgr.callout);
505 i40e_shutdown_adminq(&sc->hw);
506 taskqueue_free(sc->tq);
508 free(sc->vf_res, M_DEVBUF);
509 ixlv_free_pci_resources(sc);
510 ixlv_free_queues(vsi);
511 mtx_destroy(&sc->mtx);
512 ixlv_free_filters(sc);
514 bus_generic_detach(dev);
515 INIT_DBG_DEV(dev, "end");
519 /*********************************************************************
521 * Shutdown entry point
523 **********************************************************************/
526 ixlv_shutdown(device_t dev)
528 struct ixlv_sc *sc = device_get_softc(dev);
530 INIT_DBG_DEV(dev, "begin");
534 mtx_unlock(&sc->mtx);
536 INIT_DBG_DEV(dev, "end");
541 * Configure TXCSUM(IPV6) and TSO(4/6)
542 * - the hardware handles these together so we
546 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
548 /* Enable/disable TXCSUM/TSO4 */
549 if (!(ifp->if_capenable & IFCAP_TXCSUM)
550 && !(ifp->if_capenable & IFCAP_TSO4)) {
551 if (mask & IFCAP_TXCSUM) {
552 ifp->if_capenable |= IFCAP_TXCSUM;
553 /* enable TXCSUM, restore TSO if previously enabled */
554 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
555 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
556 ifp->if_capenable |= IFCAP_TSO4;
559 else if (mask & IFCAP_TSO4) {
560 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
561 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
563 "TSO4 requires txcsum, enabling both...\n");
565 } else if((ifp->if_capenable & IFCAP_TXCSUM)
566 && !(ifp->if_capenable & IFCAP_TSO4)) {
567 if (mask & IFCAP_TXCSUM)
568 ifp->if_capenable &= ~IFCAP_TXCSUM;
569 else if (mask & IFCAP_TSO4)
570 ifp->if_capenable |= IFCAP_TSO4;
571 } else if((ifp->if_capenable & IFCAP_TXCSUM)
572 && (ifp->if_capenable & IFCAP_TSO4)) {
573 if (mask & IFCAP_TXCSUM) {
574 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
575 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
577 "TSO4 requires txcsum, disabling both...\n");
578 } else if (mask & IFCAP_TSO4)
579 ifp->if_capenable &= ~IFCAP_TSO4;
582 /* Enable/disable TXCSUM_IPV6/TSO6 */
583 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
584 && !(ifp->if_capenable & IFCAP_TSO6)) {
585 if (mask & IFCAP_TXCSUM_IPV6) {
586 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
587 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
588 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
589 ifp->if_capenable |= IFCAP_TSO6;
591 } else if (mask & IFCAP_TSO6) {
592 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
593 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
595 "TSO6 requires txcsum6, enabling both...\n");
597 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
598 && !(ifp->if_capenable & IFCAP_TSO6)) {
599 if (mask & IFCAP_TXCSUM_IPV6)
600 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
601 else if (mask & IFCAP_TSO6)
602 ifp->if_capenable |= IFCAP_TSO6;
603 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604 && (ifp->if_capenable & IFCAP_TSO6)) {
605 if (mask & IFCAP_TXCSUM_IPV6) {
606 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
607 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
609 "TSO6 requires txcsum6, disabling both...\n");
610 } else if (mask & IFCAP_TSO6)
611 ifp->if_capenable &= ~IFCAP_TSO6;
615 /*********************************************************************
618 * ixlv_ioctl is called when the user wants to configure the
621 * return 0 on success, positive on failure
622 **********************************************************************/
625 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
627 struct ixl_vsi *vsi = ifp->if_softc;
628 struct ixlv_sc *sc = vsi->back;
629 struct ifreq *ifr = (struct ifreq *)data;
630 #if defined(INET) || defined(INET6)
631 struct ifaddr *ifa = (struct ifaddr *)data;
632 bool avoid_reset = FALSE;
641 if (ifa->ifa_addr->sa_family == AF_INET)
645 if (ifa->ifa_addr->sa_family == AF_INET6)
648 #if defined(INET) || defined(INET6)
650 ** Calling init results in link renegotiation,
651 ** so we avoid doing it when possible.
654 ifp->if_flags |= IFF_UP;
655 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
658 if (!(ifp->if_flags & IFF_NOARP))
659 arp_ifinit(ifp, ifa);
662 error = ether_ioctl(ifp, command, data);
666 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
668 if (ifr->ifr_mtu > IXL_MAX_FRAME -
669 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
671 IOCTL_DBG_IF(ifp, "mtu too large");
673 IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
674 // ERJ: Interestingly enough, these types don't match
675 ifp->if_mtu = (u_long)ifr->ifr_mtu;
676 vsi->max_frame_size =
677 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
678 + ETHER_VLAN_ENCAP_LEN;
679 ixlv_init_locked(sc);
681 mtx_unlock(&sc->mtx);
684 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
686 if (ifp->if_flags & IFF_UP) {
687 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
688 ixlv_init_locked(sc);
690 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
692 sc->if_flags = ifp->if_flags;
693 mtx_unlock(&sc->mtx);
696 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
697 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
699 ixlv_disable_intr(vsi);
701 ixlv_enable_intr(vsi);
702 mtx_unlock(&sc->mtx);
706 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
707 if (sc->init_state == IXLV_RUNNING) {
709 ixlv_disable_intr(vsi);
711 ixlv_enable_intr(vsi);
712 mtx_unlock(&sc->mtx);
717 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
718 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
722 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
723 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
725 ixlv_cap_txcsum_tso(vsi, ifp, mask);
727 if (mask & IFCAP_RXCSUM)
728 ifp->if_capenable ^= IFCAP_RXCSUM;
729 if (mask & IFCAP_RXCSUM_IPV6)
730 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
731 if (mask & IFCAP_LRO)
732 ifp->if_capenable ^= IFCAP_LRO;
733 if (mask & IFCAP_VLAN_HWTAGGING)
734 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
735 if (mask & IFCAP_VLAN_HWFILTER)
736 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
737 if (mask & IFCAP_VLAN_HWTSO)
738 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
739 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
742 VLAN_CAPABILITIES(ifp);
748 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
749 error = ether_ioctl(ifp, command, data);
757 ** To do a reinit on the VF is unfortunately more complicated
758 ** than a physical device, we must have the PF more or less
759 ** completely recreate our memory, so many things that were
760 ** done only once at attach in traditional drivers now must be
761 ** redone at each reinitialization. This function does that
762 ** 'prelude' so we can then call the normal locked init code.
765 ixlv_reinit_locked(struct ixlv_sc *sc)
767 struct i40e_hw *hw = &sc->hw;
768 struct ixl_vsi *vsi = &sc->vsi;
769 struct ifnet *ifp = vsi->ifp;
770 struct ixlv_mac_filter *mf, *mf_temp;
771 struct ixlv_vlan_filter *vf;
774 INIT_DBG_IF(ifp, "begin");
776 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
779 error = ixlv_reset(sc);
781 INIT_DBG_IF(ifp, "VF was reset");
783 /* set the state in case we went thru RESET */
784 sc->init_state = IXLV_RUNNING;
787 ** Resetting the VF drops all filters from hardware;
788 ** we need to mark them to be re-added in init.
790 SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
791 if (mf->flags & IXL_FILTER_DEL) {
792 SLIST_REMOVE(sc->mac_filters, mf,
793 ixlv_mac_filter, next);
796 mf->flags |= IXL_FILTER_ADD;
798 if (vsi->num_vlans != 0)
799 SLIST_FOREACH(vf, sc->vlan_filters, next)
800 vf->flags = IXL_FILTER_ADD;
801 else { /* clean any stale filters */
802 while (!SLIST_EMPTY(sc->vlan_filters)) {
803 vf = SLIST_FIRST(sc->vlan_filters);
804 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
809 ixlv_enable_adminq_irq(hw);
810 ixl_vc_flush(&sc->vc_mgr);
812 INIT_DBG_IF(ifp, "end");
817 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
818 enum i40e_status_code code)
825 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
826 * happens while a command is in progress, so we don't print an error
829 if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
830 if_printf(sc->vsi.ifp,
831 "Error %d waiting for PF to complete operation %d\n",
837 ixlv_init_locked(struct ixlv_sc *sc)
839 struct i40e_hw *hw = &sc->hw;
840 struct ixl_vsi *vsi = &sc->vsi;
841 struct ixl_queue *que = vsi->queues;
842 struct ifnet *ifp = vsi->ifp;
845 INIT_DBG_IF(ifp, "begin");
847 IXLV_CORE_LOCK_ASSERT(sc);
849 /* Do a reinit first if an init has already been done */
850 if ((sc->init_state == IXLV_RUNNING) ||
851 (sc->init_state == IXLV_RESET_REQUIRED) ||
852 (sc->init_state == IXLV_RESET_PENDING))
853 error = ixlv_reinit_locked(sc);
854 /* Don't bother with init if we failed reinit */
858 /* Remove existing MAC filter if new MAC addr is set */
859 if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
860 error = ixlv_del_mac_filter(sc, hw->mac.addr);
862 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
863 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
867 /* Check for an LAA mac address... */
868 bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
870 ifp->if_hwassist = 0;
871 if (ifp->if_capenable & IFCAP_TSO)
872 ifp->if_hwassist |= CSUM_TSO;
873 if (ifp->if_capenable & IFCAP_TXCSUM)
874 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
875 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
876 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
878 /* Add mac filter for this VF to PF */
879 if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
880 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
881 if (!error || error == EEXIST)
882 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
883 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
887 /* Setup vlan's if needed */
888 ixlv_setup_vlan_filters(sc);
890 /* Prepare the queues for operation */
891 for (int i = 0; i < vsi->num_queues; i++, que++) {
892 struct rx_ring *rxr = &que->rxr;
894 ixl_init_tx_ring(que);
896 if (vsi->max_frame_size <= MCLBYTES)
897 rxr->mbuf_sz = MCLBYTES;
899 rxr->mbuf_sz = MJUMPAGESIZE;
900 ixl_init_rx_ring(que);
903 /* Configure queues */
904 ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
905 IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
911 ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
912 IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
915 ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
916 IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
918 /* Start the local timer */
919 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
921 sc->init_state = IXLV_RUNNING;
924 INIT_DBG_IF(ifp, "end");
929 ** Init entry point for the stack
934 struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
935 struct ixlv_sc *sc = vsi->back;
939 ixlv_init_locked(sc);
940 mtx_unlock(&sc->mtx);
942 /* Wait for init_locked to finish */
943 while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
944 && ++retries < 100) {
947 if (retries >= IXLV_AQ_MAX_ERR)
949 "Init failed to complete in alloted time!\n");
953 * ixlv_attach() helper function; gathers information about
954 * the (virtual) hardware for use elsewhere in the driver.
957 ixlv_init_hw(struct ixlv_sc *sc)
959 struct i40e_hw *hw = &sc->hw;
960 device_t dev = sc->dev;
962 /* Save off the information about this board */
963 hw->vendor_id = pci_get_vendor(dev);
964 hw->device_id = pci_get_device(dev);
965 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
966 hw->subsystem_vendor_id =
967 pci_read_config(dev, PCIR_SUBVEND_0, 2);
968 hw->subsystem_device_id =
969 pci_read_config(dev, PCIR_SUBDEV_0, 2);
971 hw->bus.device = pci_get_slot(dev);
972 hw->bus.func = pci_get_function(dev);
976 * ixlv_attach() helper function; initalizes the admin queue
977 * and attempts to establish contact with the PF by
978 * retrying the initial "API version" message several times
979 * or until the PF responds.
982 ixlv_setup_vc(struct ixlv_sc *sc)
984 struct i40e_hw *hw = &sc->hw;
985 device_t dev = sc->dev;
986 int error = 0, ret_error = 0, asq_retries = 0;
987 bool send_api_ver_retried = 0;
989 /* Need to set these AQ paramters before initializing AQ */
990 hw->aq.num_arq_entries = IXL_AQ_LEN;
991 hw->aq.num_asq_entries = IXL_AQ_LEN;
992 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
993 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
995 for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
996 /* Initialize admin queue */
997 error = i40e_init_adminq(hw);
999 device_printf(dev, "%s: init_adminq failed: %d\n",
1005 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1008 /* Send VF's API version */
1009 error = ixlv_send_api_ver(sc);
1011 i40e_shutdown_adminq(hw);
1013 device_printf(dev, "%s: unable to send api"
1014 " version to PF on attempt %d, error %d\n",
1015 __func__, i+1, error);
1019 while (!i40e_asq_done(hw)) {
1020 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1021 i40e_shutdown_adminq(hw);
1022 DDPRINTF(dev, "Admin Queue timeout "
1023 "(waiting for send_api_ver), %d more retries...",
1024 IXLV_AQ_MAX_ERR - (i + 1));
1028 i40e_msec_delay(10);
1030 if (asq_retries > IXLV_AQ_MAX_ERR)
1033 INIT_DBG_DEV(dev, "Sent API version message to PF");
1035 /* Verify that the VF accepts the PF's API version */
1036 error = ixlv_verify_api_ver(sc);
1037 if (error == ETIMEDOUT) {
1038 if (!send_api_ver_retried) {
1039 /* Resend message, one more time */
1040 send_api_ver_retried++;
1042 "%s: Timeout while verifying API version on first"
1043 " try!\n", __func__);
1047 "%s: Timeout while verifying API version on second"
1048 " try!\n", __func__);
1055 "%s: Unable to verify API version,"
1056 " error %d\n", __func__, error);
1063 i40e_shutdown_adminq(hw);
1068 * ixlv_attach() helper function; asks the PF for this VF's
1069 * configuration, and saves the information if it receives it.
1072 ixlv_vf_config(struct ixlv_sc *sc)
1074 struct i40e_hw *hw = &sc->hw;
1075 device_t dev = sc->dev;
1076 int bufsz, error = 0, ret_error = 0;
1077 int asq_retries, retried = 0;
1080 error = ixlv_send_vf_config_msg(sc);
1083 "%s: Unable to send VF config request, attempt %d,"
1084 " error %d\n", __func__, retried + 1, error);
1089 while (!i40e_asq_done(hw)) {
1090 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1091 device_printf(dev, "%s: Admin Queue timeout "
1092 "(waiting for send_vf_config_msg), attempt %d\n",
1093 __func__, retried + 1);
1097 i40e_msec_delay(10);
1100 INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1104 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1105 (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1106 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1109 "%s: Unable to allocate memory for VF configuration"
1110 " message from PF on attempt %d\n", __func__, retried + 1);
1116 /* Check for VF config response */
1117 error = ixlv_get_vf_config(sc);
1118 if (error == ETIMEDOUT) {
1119 /* The 1st time we timeout, send the configuration message again */
1127 "%s: Unable to get VF configuration from PF after %d tries!\n",
1128 __func__, retried + 1);
1134 free(sc->vf_res, M_DEVBUF);
1140 * Allocate MSI/X vectors, setup the AQ vector early
1143 ixlv_init_msix(struct ixlv_sc *sc)
1145 device_t dev = sc->dev;
1146 int rid, want, vectors, queues, available;
1148 rid = PCIR_BAR(IXL_BAR);
1149 sc->msix_mem = bus_alloc_resource_any(dev,
1150 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1151 if (!sc->msix_mem) {
1152 /* May not be enabled */
1153 device_printf(sc->dev,
1154 "Unable to map MSIX table \n");
1158 available = pci_msix_count(dev);
1159 if (available == 0) { /* system has msix disabled */
1160 bus_release_resource(dev, SYS_RES_MEMORY,
1162 sc->msix_mem = NULL;
1166 /* Figure out a reasonable auto config value */
1167 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1169 /* Override with hardcoded value if sane */
1170 if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1171 queues = ixlv_max_queues;
1173 /* If we're doing RSS, clamp at the number of RSS buckets */
1174 if (queues > rss_getnumbuckets())
1175 queues = rss_getnumbuckets();
1177 /* Enforce the VF max value */
1178 if (queues > IXLV_MAX_QUEUES)
1179 queues = IXLV_MAX_QUEUES;
1182 ** Want one vector (RX/TX pair) per queue
1183 ** plus an additional for the admin queue.
1186 if (want <= available) /* Have enough */
1189 device_printf(sc->dev,
1190 "MSIX Configuration Problem, "
1191 "%d vectors available but %d wanted!\n",
1198 * If we're doing RSS, the number of queues needs to
1199 * match the number of RSS buckets that are configured.
1201 * + If there's more queues than RSS buckets, we'll end
1202 * up with queues that get no traffic.
1204 * + If there's more RSS buckets than queues, we'll end
1205 * up having multiple RSS buckets map to the same queue,
1206 * so there'll be some contention.
1208 if (queues != rss_getnumbuckets()) {
1210 "%s: queues (%d) != RSS buckets (%d)"
1211 "; performance will be impacted.\n",
1212 __func__, queues, rss_getnumbuckets());
1216 if (pci_alloc_msix(dev, &vectors) == 0) {
1217 device_printf(sc->dev,
1218 "Using MSIX interrupts with %d vectors\n", vectors);
1220 sc->vsi.num_queues = queues;
1224 ** Explicitly set the guest PCI BUSMASTER capability
1225 ** and we must rewrite the ENABLE in the MSIX control
1226 ** register again at this point to cause the host to
1227 ** successfully initialize us.
1232 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1233 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1234 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1235 pci_find_cap(dev, PCIY_MSIX, &rid);
1236 rid += PCIR_MSIX_CTRL;
1237 msix_ctrl = pci_read_config(dev, rid, 2);
1238 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1239 pci_write_config(dev, rid, msix_ctrl, 2);
1242 /* Next we need to setup the vector for the Admin Queue */
1243 rid = 1; // zero vector + 1
1244 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1245 &rid, RF_SHAREABLE | RF_ACTIVE);
1246 if (sc->res == NULL) {
1247 device_printf(dev,"Unable to allocate"
1248 " bus resource: AQ interrupt \n");
1251 if (bus_setup_intr(dev, sc->res,
1252 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1253 ixlv_msix_adminq, sc, &sc->tag)) {
1255 device_printf(dev, "Failed to register AQ handler");
1258 bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1263 /* The VF driver MUST use MSIX */
1268 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1271 device_t dev = sc->dev;
1274 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1277 if (!(sc->pci_mem)) {
1278 device_printf(dev,"Unable to allocate bus resource: memory\n");
1282 sc->osdep.mem_bus_space_tag =
1283 rman_get_bustag(sc->pci_mem);
1284 sc->osdep.mem_bus_space_handle =
1285 rman_get_bushandle(sc->pci_mem);
1286 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1287 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1288 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1290 sc->hw.back = &sc->osdep;
1292 /* Disable adminq interrupts */
1293 ixlv_disable_adminq_irq(&sc->hw);
1296 ** Now setup MSI/X, it will return
1297 ** us the number of supported vectors
1299 sc->msix = ixlv_init_msix(sc);
1301 /* We fail without MSIX support */
1309 ixlv_free_pci_resources(struct ixlv_sc *sc)
1311 struct ixl_vsi *vsi = &sc->vsi;
1312 struct ixl_queue *que = vsi->queues;
1313 device_t dev = sc->dev;
1315 /* We may get here before stations are setup */
1320 ** Release all msix queue resources:
1322 for (int i = 0; i < vsi->num_queues; i++, que++) {
1323 int rid = que->msix + 1;
1324 if (que->tag != NULL) {
1325 bus_teardown_intr(dev, que->res, que->tag);
1328 if (que->res != NULL)
1329 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1333 /* Clean the AdminQ interrupt */
1334 if (sc->tag != NULL) {
1335 bus_teardown_intr(dev, sc->res, sc->tag);
1338 if (sc->res != NULL)
1339 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1341 pci_release_msi(dev);
1343 if (sc->msix_mem != NULL)
1344 bus_release_resource(dev, SYS_RES_MEMORY,
1345 PCIR_BAR(IXL_BAR), sc->msix_mem);
1347 if (sc->pci_mem != NULL)
1348 bus_release_resource(dev, SYS_RES_MEMORY,
1349 PCIR_BAR(0), sc->pci_mem);
1355 * Create taskqueue and tasklet for Admin Queue interrupts.
1358 ixlv_init_taskqueue(struct ixlv_sc *sc)
1362 TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1364 sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1365 taskqueue_thread_enqueue, &sc->tq);
1366 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1367 device_get_nameunit(sc->dev));
1372 /*********************************************************************
1374 * Setup MSIX Interrupt resources and handlers for the VSI queues
1376 **********************************************************************/
1378 ixlv_assign_msix(struct ixlv_sc *sc)
1380 device_t dev = sc->dev;
1381 struct ixl_vsi *vsi = &sc->vsi;
1382 struct ixl_queue *que = vsi->queues;
1383 struct tx_ring *txr;
1384 int error, rid, vector = 1;
1389 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1393 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1394 RF_SHAREABLE | RF_ACTIVE);
1395 if (que->res == NULL) {
1396 device_printf(dev,"Unable to allocate"
1397 " bus resource: que interrupt [%d]\n", vector);
1400 /* Set the handler function */
1401 error = bus_setup_intr(dev, que->res,
1402 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1403 ixlv_msix_que, que, &que->tag);
1406 device_printf(dev, "Failed to register que handler");
1409 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1410 /* Bind the vector to a CPU */
1412 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1414 bus_bind_intr(dev, que->res, cpu_id);
1416 vsi->que_mask |= (u64)(1 << que->msix);
1417 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1418 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1419 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1420 taskqueue_thread_enqueue, &que->tq);
1422 CPU_SETOF(cpu_id, &cpu_mask);
1423 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1424 &cpu_mask, "%s (bucket %d)",
1425 device_get_nameunit(dev), cpu_id);
1427 taskqueue_start_threads(&que->tq, 1, PI_NET,
1428 "%s que", device_get_nameunit(dev));
1437 ** Requests a VF reset from the PF.
1439 ** Requires the VF's Admin Queue to be initialized.
1442 ixlv_reset(struct ixlv_sc *sc)
1444 struct i40e_hw *hw = &sc->hw;
1445 device_t dev = sc->dev;
1448 /* Ask the PF to reset us if we are initiating */
1449 if (sc->init_state != IXLV_RESET_PENDING)
1450 ixlv_request_reset(sc);
1452 i40e_msec_delay(100);
1453 error = ixlv_reset_complete(hw);
1455 device_printf(dev, "%s: VF reset failed\n",
1460 error = i40e_shutdown_adminq(hw);
1462 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1467 error = i40e_init_adminq(hw);
1469 device_printf(dev, "%s: init_adminq failed: %d\n",
1478 ixlv_reset_complete(struct i40e_hw *hw)
1482 for (int i = 0; i < 100; i++) {
1483 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1484 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1486 if ((reg == I40E_VFR_VFACTIVE) ||
1487 (reg == I40E_VFR_COMPLETED))
1489 i40e_msec_delay(100);
1496 /*********************************************************************
1498 * Setup networking device structure and register an interface.
1500 **********************************************************************/
1502 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1505 struct ixl_vsi *vsi = &sc->vsi;
1506 struct ixl_queue *que = vsi->queues;
1508 INIT_DBG_DEV(dev, "begin");
1510 ifp = vsi->ifp = if_alloc(IFT_ETHER);
1512 device_printf(dev, "%s: could not allocate ifnet"
1513 " structure!\n", __func__);
1517 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1519 ifp->if_mtu = ETHERMTU;
1520 ifp->if_baudrate = 4000000000; // ??
1521 ifp->if_init = ixlv_init;
1522 ifp->if_softc = vsi;
1523 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1524 ifp->if_ioctl = ixlv_ioctl;
1526 #if __FreeBSD_version >= 1100000
1527 if_setgetcounterfn(ifp, ixl_get_counter);
1530 ifp->if_transmit = ixl_mq_start;
1532 ifp->if_qflush = ixl_qflush;
1533 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1535 ether_ifattach(ifp, sc->hw.mac.addr);
1537 vsi->max_frame_size =
1538 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1539 + ETHER_VLAN_ENCAP_LEN;
1542 * Tell the upper layer(s) we support long frames.
1544 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1546 ifp->if_capabilities |= IFCAP_HWCSUM;
1547 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1548 ifp->if_capabilities |= IFCAP_TSO;
1549 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1551 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1556 ifp->if_capenable = ifp->if_capabilities;
1559 ** Don't turn this on by default, if vlans are
1560 ** created on another pseudo device (eg. lagg)
1561 ** then vlan events are not passed thru, breaking
1562 ** operation, but with HW FILTER off it works. If
1563 ** using vlans directly on the ixl driver you can
1564 ** enable this and get full hardware tag filtering.
1566 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1569 * Specify the media types supported by this adapter and register
1570 * callbacks to update media and link information
1572 ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1575 // JFV Add media types later?
1577 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1578 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1580 INIT_DBG_DEV(dev, "end");
1585 ** Allocate and setup the interface queues
1588 ixlv_setup_queues(struct ixlv_sc *sc)
1590 device_t dev = sc->dev;
1591 struct ixl_vsi *vsi;
1592 struct ixl_queue *que;
1593 struct tx_ring *txr;
1594 struct rx_ring *rxr;
1596 int error = I40E_SUCCESS;
1599 vsi->back = (void *)sc;
1603 /* Get memory for the station queues */
1605 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1606 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1607 device_printf(dev, "Unable to allocate queue memory\n");
1612 for (int i = 0; i < vsi->num_queues; i++) {
1613 que = &vsi->queues[i];
1614 que->num_desc = ixlv_ringsz;
1617 /* mark the queue as active */
1618 vsi->active_queues |= (u64)1 << que->me;
1622 txr->tail = I40E_QTX_TAIL1(que->me);
1623 /* Initialize the TX lock */
1624 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1625 device_get_nameunit(dev), que->me);
1626 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1628 ** Create the TX descriptor ring, the extra int is
1629 ** added as the location for HEAD WB.
1631 tsize = roundup2((que->num_desc *
1632 sizeof(struct i40e_tx_desc)) +
1633 sizeof(u32), DBA_ALIGN);
1634 if (i40e_allocate_dma_mem(&sc->hw,
1635 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1637 "Unable to allocate TX Descriptor memory\n");
1641 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1642 bzero((void *)txr->base, tsize);
1643 /* Now allocate transmit soft structs for the ring */
1644 if (ixl_allocate_tx_data(que)) {
1646 "Critical Failure setting up TX structures\n");
1650 /* Allocate a buf ring */
1651 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1652 M_WAITOK, &txr->mtx);
1653 if (txr->br == NULL) {
1655 "Critical Failure setting up TX buf ring\n");
1661 * Next the RX queues...
1663 rsize = roundup2(que->num_desc *
1664 sizeof(union i40e_rx_desc), DBA_ALIGN);
1667 rxr->tail = I40E_QRX_TAIL1(que->me);
1669 /* Initialize the RX side lock */
1670 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1671 device_get_nameunit(dev), que->me);
1672 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1674 if (i40e_allocate_dma_mem(&sc->hw,
1675 &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1677 "Unable to allocate RX Descriptor memory\n");
1681 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1682 bzero((void *)rxr->base, rsize);
1684 /* Allocate receive soft structs for the ring*/
1685 if (ixl_allocate_rx_data(que)) {
1687 "Critical Failure setting up receive structs\n");
1696 for (int i = 0; i < vsi->num_queues; i++) {
1697 que = &vsi->queues[i];
1701 i40e_free_dma_mem(&sc->hw, &rxr->dma);
1703 i40e_free_dma_mem(&sc->hw, &txr->dma);
1705 free(vsi->queues, M_DEVBUF);
1712 ** This routine is run via an vlan config EVENT,
1713 ** it enables us to use the HW Filter table since
1714 ** we can get the vlan id. This just creates the
1715 ** entry in the soft version of the VFTA, init will
1716 ** repopulate the real table.
1719 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1721 struct ixl_vsi *vsi = arg;
1722 struct ixlv_sc *sc = vsi->back;
1723 struct ixlv_vlan_filter *v;
1726 if (ifp->if_softc != arg) /* Not our event */
1729 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1732 /* Sanity check - make sure it doesn't already exist */
1733 SLIST_FOREACH(v, sc->vlan_filters, next) {
1734 if (v->vlan == vtag)
1740 v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1741 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1743 v->flags = IXL_FILTER_ADD;
1744 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1745 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1746 mtx_unlock(&sc->mtx);
1751 ** This routine is run via an vlan
1752 ** unconfig EVENT, remove our entry
1753 ** in the soft vfta.
1756 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1758 struct ixl_vsi *vsi = arg;
1759 struct ixlv_sc *sc = vsi->back;
1760 struct ixlv_vlan_filter *v;
1763 if (ifp->if_softc != arg)
1766 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1770 SLIST_FOREACH(v, sc->vlan_filters, next) {
1771 if (v->vlan == vtag) {
1772 v->flags = IXL_FILTER_DEL;
1778 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1779 IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1780 mtx_unlock(&sc->mtx);
1785 ** Get a new filter and add it to the mac filter list.
1787 static struct ixlv_mac_filter *
1788 ixlv_get_mac_filter(struct ixlv_sc *sc)
1790 struct ixlv_mac_filter *f;
1792 f = malloc(sizeof(struct ixlv_mac_filter),
1793 M_DEVBUF, M_NOWAIT | M_ZERO);
1795 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1801 ** Find the filter with matching MAC address
1803 static struct ixlv_mac_filter *
1804 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1806 struct ixlv_mac_filter *f;
1809 SLIST_FOREACH(f, sc->mac_filters, next) {
1810 if (cmp_etheraddr(f->macaddr, macaddr)) {
1822 ** Admin Queue interrupt handler
1825 ixlv_msix_adminq(void *arg)
1827 struct ixlv_sc *sc = arg;
1828 struct i40e_hw *hw = &sc->hw;
1831 reg = rd32(hw, I40E_VFINT_ICR01);
1832 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1834 reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1835 reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1836 wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1839 taskqueue_enqueue(sc->tq, &sc->aq_irq);
1844 ixlv_enable_intr(struct ixl_vsi *vsi)
1846 struct i40e_hw *hw = vsi->hw;
1847 struct ixl_queue *que = vsi->queues;
1849 ixlv_enable_adminq_irq(hw);
1850 for (int i = 0; i < vsi->num_queues; i++, que++)
1851 ixlv_enable_queue_irq(hw, que->me);
1855 ixlv_disable_intr(struct ixl_vsi *vsi)
1857 struct i40e_hw *hw = vsi->hw;
1858 struct ixl_queue *que = vsi->queues;
1860 ixlv_disable_adminq_irq(hw);
1861 for (int i = 0; i < vsi->num_queues; i++, que++)
1862 ixlv_disable_queue_irq(hw, que->me);
1867 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1869 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1870 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1872 rd32(hw, I40E_VFGEN_RSTAT);
1877 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1879 wr32(hw, I40E_VFINT_DYN_CTL01,
1880 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1881 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1882 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1884 rd32(hw, I40E_VFGEN_RSTAT);
1889 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1893 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1894 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
1895 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1899 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1901 wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1902 rd32(hw, I40E_VFGEN_RSTAT);
1908 ** Provide a update to the queue RX
1909 ** interrupt moderation value.
1912 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1914 struct ixl_vsi *vsi = que->vsi;
1915 struct i40e_hw *hw = vsi->hw;
1916 struct rx_ring *rxr = &que->rxr;
1922 /* Idle, do nothing */
1923 if (rxr->bytes == 0)
1926 if (ixlv_dynamic_rx_itr) {
1927 rx_bytes = rxr->bytes/rxr->itr;
1930 /* Adjust latency range */
1931 switch (rxr->latency) {
1932 case IXL_LOW_LATENCY:
1933 if (rx_bytes > 10) {
1934 rx_latency = IXL_AVE_LATENCY;
1935 rx_itr = IXL_ITR_20K;
1938 case IXL_AVE_LATENCY:
1939 if (rx_bytes > 20) {
1940 rx_latency = IXL_BULK_LATENCY;
1941 rx_itr = IXL_ITR_8K;
1942 } else if (rx_bytes <= 10) {
1943 rx_latency = IXL_LOW_LATENCY;
1944 rx_itr = IXL_ITR_100K;
1947 case IXL_BULK_LATENCY:
1948 if (rx_bytes <= 20) {
1949 rx_latency = IXL_AVE_LATENCY;
1950 rx_itr = IXL_ITR_20K;
1955 rxr->latency = rx_latency;
1957 if (rx_itr != rxr->itr) {
1958 /* do an exponential smoothing */
1959 rx_itr = (10 * rx_itr * rxr->itr) /
1960 ((9 * rx_itr) + rxr->itr);
1961 rxr->itr = rx_itr & IXL_MAX_ITR;
1962 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1963 que->me), rxr->itr);
1965 } else { /* We may have have toggled to non-dynamic */
1966 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1967 vsi->rx_itr_setting = ixlv_rx_itr;
1968 /* Update the hardware if needed */
1969 if (rxr->itr != vsi->rx_itr_setting) {
1970 rxr->itr = vsi->rx_itr_setting;
1971 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1972 que->me), rxr->itr);
1982 ** Provide a update to the queue TX
1983 ** interrupt moderation value.
1986 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1988 struct ixl_vsi *vsi = que->vsi;
1989 struct i40e_hw *hw = vsi->hw;
1990 struct tx_ring *txr = &que->txr;
1996 /* Idle, do nothing */
1997 if (txr->bytes == 0)
2000 if (ixlv_dynamic_tx_itr) {
2001 tx_bytes = txr->bytes/txr->itr;
2004 switch (txr->latency) {
2005 case IXL_LOW_LATENCY:
2006 if (tx_bytes > 10) {
2007 tx_latency = IXL_AVE_LATENCY;
2008 tx_itr = IXL_ITR_20K;
2011 case IXL_AVE_LATENCY:
2012 if (tx_bytes > 20) {
2013 tx_latency = IXL_BULK_LATENCY;
2014 tx_itr = IXL_ITR_8K;
2015 } else if (tx_bytes <= 10) {
2016 tx_latency = IXL_LOW_LATENCY;
2017 tx_itr = IXL_ITR_100K;
2020 case IXL_BULK_LATENCY:
2021 if (tx_bytes <= 20) {
2022 tx_latency = IXL_AVE_LATENCY;
2023 tx_itr = IXL_ITR_20K;
2028 txr->latency = tx_latency;
2030 if (tx_itr != txr->itr) {
2031 /* do an exponential smoothing */
2032 tx_itr = (10 * tx_itr * txr->itr) /
2033 ((9 * tx_itr) + txr->itr);
2034 txr->itr = tx_itr & IXL_MAX_ITR;
2035 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2036 que->me), txr->itr);
2039 } else { /* We may have have toggled to non-dynamic */
2040 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2041 vsi->tx_itr_setting = ixlv_tx_itr;
2042 /* Update the hardware if needed */
2043 if (txr->itr != vsi->tx_itr_setting) {
2044 txr->itr = vsi->tx_itr_setting;
2045 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2046 que->me), txr->itr);
2057 ** MSIX Interrupt Handlers and Tasklets
2061 ixlv_handle_que(void *context, int pending)
2063 struct ixl_queue *que = context;
2064 struct ixl_vsi *vsi = que->vsi;
2065 struct i40e_hw *hw = vsi->hw;
2066 struct tx_ring *txr = &que->txr;
2067 struct ifnet *ifp = vsi->ifp;
2070 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2071 more = ixl_rxeof(que, IXL_RX_LIMIT);
2072 mtx_lock(&txr->mtx);
2074 if (!drbr_empty(ifp, txr->br))
2075 ixl_mq_start_locked(ifp, txr);
2076 mtx_unlock(&txr->mtx);
2078 taskqueue_enqueue(que->tq, &que->task);
2083 /* Reenable this interrupt - hmmm */
2084 ixlv_enable_queue_irq(hw, que->me);
2089 /*********************************************************************
2091 * MSIX Queue Interrupt Service routine
2093 **********************************************************************/
2095 ixlv_msix_que(void *arg)
2097 struct ixl_queue *que = arg;
2098 struct ixl_vsi *vsi = que->vsi;
2099 struct i40e_hw *hw = vsi->hw;
2100 struct tx_ring *txr = &que->txr;
2101 bool more_tx, more_rx;
2103 /* Spurious interrupts are ignored */
2104 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2109 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2111 mtx_lock(&txr->mtx);
2112 more_tx = ixl_txeof(que);
2114 ** Make certain that if the stack
2115 ** has anything queued the task gets
2116 ** scheduled to handle it.
2118 if (!drbr_empty(vsi->ifp, txr->br))
2120 mtx_unlock(&txr->mtx);
2122 ixlv_set_queue_rx_itr(que);
2123 ixlv_set_queue_tx_itr(que);
2125 if (more_tx || more_rx)
2126 taskqueue_enqueue(que->tq, &que->task);
2128 ixlv_enable_queue_irq(hw, que->me);
2134 /*********************************************************************
2136 * Media Ioctl callback
2138 * This routine is called whenever the user queries the status of
2139 * the interface using ifconfig.
2141 **********************************************************************/
2143 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2145 struct ixl_vsi *vsi = ifp->if_softc;
2146 struct ixlv_sc *sc = vsi->back;
2148 INIT_DBG_IF(ifp, "begin");
2152 ixlv_update_link_status(sc);
2154 ifmr->ifm_status = IFM_AVALID;
2155 ifmr->ifm_active = IFM_ETHER;
2158 mtx_unlock(&sc->mtx);
2159 INIT_DBG_IF(ifp, "end: link not up");
2163 ifmr->ifm_status |= IFM_ACTIVE;
2164 /* Hardware is always full-duplex */
2165 ifmr->ifm_active |= IFM_FDX;
2166 mtx_unlock(&sc->mtx);
2167 INIT_DBG_IF(ifp, "end");
2171 /*********************************************************************
2173 * Media Ioctl callback
2175 * This routine is called when the user changes speed/duplex using
2176 * media/mediopt option with ifconfig.
2178 **********************************************************************/
2180 ixlv_media_change(struct ifnet * ifp)
2182 struct ixl_vsi *vsi = ifp->if_softc;
2183 struct ifmedia *ifm = &vsi->media;
2185 INIT_DBG_IF(ifp, "begin");
2187 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2190 INIT_DBG_IF(ifp, "end");
2195 /*********************************************************************
2196 * Multicast Initialization
2198 * This routine is called by init to reset a fresh state.
2200 **********************************************************************/
2203 ixlv_init_multi(struct ixl_vsi *vsi)
2205 struct ixlv_mac_filter *f;
2206 struct ixlv_sc *sc = vsi->back;
2209 IOCTL_DBG_IF(vsi->ifp, "begin");
2211 /* First clear any multicast filters */
2212 SLIST_FOREACH(f, sc->mac_filters, next) {
2213 if ((f->flags & IXL_FILTER_USED)
2214 && (f->flags & IXL_FILTER_MC)) {
2215 f->flags |= IXL_FILTER_DEL;
2220 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2221 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2224 IOCTL_DBG_IF(vsi->ifp, "end");
2228 ixlv_add_multi(struct ixl_vsi *vsi)
2230 struct ifmultiaddr *ifma;
2231 struct ifnet *ifp = vsi->ifp;
2232 struct ixlv_sc *sc = vsi->back;
2235 IOCTL_DBG_IF(ifp, "begin");
2237 if_maddr_rlock(ifp);
2239 ** Get a count, to decide if we
2240 ** simply use multicast promiscuous.
2242 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2243 if (ifma->ifma_addr->sa_family != AF_LINK)
2247 if_maddr_runlock(ifp);
2249 // TODO: Remove -- cannot set promiscuous mode in a VF
2250 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2251 /* delete all multicast filters */
2252 ixlv_init_multi(vsi);
2253 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2254 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2255 IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2257 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2262 if_maddr_rlock(ifp);
2263 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2264 if (ifma->ifma_addr->sa_family != AF_LINK)
2266 if (!ixlv_add_mac_filter(sc,
2267 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2271 if_maddr_runlock(ifp);
2273 ** Notify AQ task that sw filters need to be
2277 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2278 IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2281 IOCTL_DBG_IF(ifp, "end");
2285 ixlv_del_multi(struct ixl_vsi *vsi)
2287 struct ixlv_mac_filter *f;
2288 struct ifmultiaddr *ifma;
2289 struct ifnet *ifp = vsi->ifp;
2290 struct ixlv_sc *sc = vsi->back;
2294 IOCTL_DBG_IF(ifp, "begin");
2296 /* Search for removed multicast addresses */
2297 if_maddr_rlock(ifp);
2298 SLIST_FOREACH(f, sc->mac_filters, next) {
2299 if ((f->flags & IXL_FILTER_USED)
2300 && (f->flags & IXL_FILTER_MC)) {
2301 /* check if mac address in filter is in sc's list */
2303 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2304 if (ifma->ifma_addr->sa_family != AF_LINK)
2307 (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2308 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2313 /* if this filter is not in the sc's list, remove it */
2314 if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2315 f->flags |= IXL_FILTER_DEL;
2317 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2318 MAC_FORMAT_ARGS(f->macaddr));
2320 else if (match == FALSE)
2321 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2322 MAC_FORMAT_ARGS(f->macaddr));
2325 if_maddr_runlock(ifp);
2328 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2329 IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2332 IOCTL_DBG_IF(ifp, "end");
2335 /*********************************************************************
2338 * This routine checks for link status,updates statistics,
2339 * and runs the watchdog check.
2341 **********************************************************************/
2344 ixlv_local_timer(void *arg)
2346 struct ixlv_sc *sc = arg;
2347 struct i40e_hw *hw = &sc->hw;
2348 struct ixl_vsi *vsi = &sc->vsi;
2349 struct ixl_queue *que = vsi->queues;
2350 device_t dev = sc->dev;
2354 IXLV_CORE_LOCK_ASSERT(sc);
2356 /* If Reset is in progress just bail */
2357 if (sc->init_state == IXLV_RESET_PENDING)
2360 /* Check for when PF triggers a VF reset */
2361 val = rd32(hw, I40E_VFGEN_RSTAT) &
2362 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2364 if (val != I40E_VFR_VFACTIVE
2365 && val != I40E_VFR_COMPLETED) {
2366 DDPRINTF(dev, "reset in progress! (%d)", val);
2370 ixlv_request_stats(sc);
2372 /* clean and process any events */
2373 taskqueue_enqueue(sc->tq, &sc->aq_irq);
2376 ** Check status on the queues for a hang
2378 mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2379 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2381 for (int i = 0; i < vsi->num_queues; i++,que++) {
2382 /* Any queues with outstanding work get a sw irq */
2384 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2386 ** Each time txeof runs without cleaning, but there
2387 ** are uncleaned descriptors it increments busy. If
2388 ** we get to 5 we declare it hung.
2390 if (que->busy == IXL_QUEUE_HUNG) {
2392 /* Mark the queue as inactive */
2393 vsi->active_queues &= ~((u64)1 << que->me);
2396 /* Check if we've come back from hung */
2397 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2398 vsi->active_queues |= ((u64)1 << que->me);
2400 if (que->busy >= IXL_MAX_TX_BUSY) {
2401 device_printf(dev,"Warning queue %d "
2402 "appears to be hung!\n", i);
2403 que->busy = IXL_QUEUE_HUNG;
2407 /* Only reset when all queues show hung */
2408 if (hung == vsi->num_queues)
2410 callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2414 device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2415 sc->init_state = IXLV_RESET_REQUIRED;
2416 ixlv_init_locked(sc);
2420 ** Note: this routine updates the OS on the link state
2421 ** the real check of the hardware only happens with
2422 ** a link interrupt.
2425 ixlv_update_link_status(struct ixlv_sc *sc)
2427 struct ixl_vsi *vsi = &sc->vsi;
2428 struct ifnet *ifp = vsi->ifp;
2431 if (vsi->link_active == FALSE) {
2433 if_printf(ifp,"Link is Up, %d Gbps\n",
2434 (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2435 vsi->link_active = TRUE;
2436 if_link_state_change(ifp, LINK_STATE_UP);
2438 } else { /* Link down */
2439 if (vsi->link_active == TRUE) {
2441 if_printf(ifp,"Link is Down\n");
2442 if_link_state_change(ifp, LINK_STATE_DOWN);
2443 vsi->link_active = FALSE;
2450 /*********************************************************************
2452 * This routine disables all traffic on the adapter by issuing a
2453 * global reset on the MAC and deallocates TX/RX buffers.
2455 **********************************************************************/
2458 ixlv_stop(struct ixlv_sc *sc)
2464 INIT_DBG_IF(ifp, "begin");
2466 IXLV_CORE_LOCK_ASSERT(sc);
2468 ixl_vc_flush(&sc->vc_mgr);
2469 ixlv_disable_queues(sc);
2472 while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2473 ((ticks - start) < hz/10))
2474 ixlv_do_adminq_locked(sc);
2476 /* Stop the local timer */
2477 callout_stop(&sc->timer);
2479 INIT_DBG_IF(ifp, "end");
2483 /*********************************************************************
2485 * Free all station queue structs.
2487 **********************************************************************/
2489 ixlv_free_queues(struct ixl_vsi *vsi)
2491 struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
2492 struct ixl_queue *que = vsi->queues;
2494 for (int i = 0; i < vsi->num_queues; i++, que++) {
2495 struct tx_ring *txr = &que->txr;
2496 struct rx_ring *rxr = &que->rxr;
2498 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2501 ixl_free_que_tx(que);
2503 i40e_free_dma_mem(&sc->hw, &txr->dma);
2505 IXL_TX_LOCK_DESTROY(txr);
2507 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2510 ixl_free_que_rx(que);
2512 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2514 IXL_RX_LOCK_DESTROY(rxr);
2517 free(vsi->queues, M_DEVBUF);
2522 ** ixlv_config_rss - setup RSS
2524 ** RSS keys and table are cleared on VF reset.
2527 ixlv_config_rss(struct ixlv_sc *sc)
2529 struct i40e_hw *hw = &sc->hw;
2530 struct ixl_vsi *vsi = &sc->vsi;
2532 u64 set_hena = 0, hena;
2535 u32 rss_hash_config;
2536 u32 rss_seed[IXL_KEYSZ];
2538 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
2539 0x183cfd8c, 0xce880440, 0x580cbc3c,
2540 0x35897377, 0x328b25e1, 0x4fa98922,
2541 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2544 /* Don't set up RSS if using a single queue */
2545 if (vsi->num_queues == 1) {
2546 wr32(hw, I40E_VFQF_HENA(0), 0);
2547 wr32(hw, I40E_VFQF_HENA(1), 0);
2553 /* Fetch the configured RSS key */
2554 rss_getkey((uint8_t *) &rss_seed);
2556 /* Fill out hash function seed */
2557 for (i = 0; i <= IXL_KEYSZ; i++)
2558 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2560 /* Enable PCTYPES for RSS: */
2562 rss_hash_config = rss_gethashconfig();
2563 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2564 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2565 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2566 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2567 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2568 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2569 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2570 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2571 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2572 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2573 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2574 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2575 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2576 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2579 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2580 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2581 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2582 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2583 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2584 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2585 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2586 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2587 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2588 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2589 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2591 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2592 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2594 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2595 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2597 /* Populate the LUT with max no. of queues in round robin fashion */
2598 for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2599 if (j == vsi->num_queues)
2603 * Fetch the RSS bucket id for the given indirection entry.
2604 * Cap it at the number of configured buckets (which is
2607 que_id = rss_get_indirection_to_bucket(i);
2608 que_id = que_id % vsi->num_queues;
2612 /* lut = 4-byte sliding window of 4 lut entries */
2613 lut = (lut << 8) | (que_id & 0xF);
2614 /* On i = 3, we have 4 entries in lut; write to the register */
2616 wr32(hw, I40E_VFQF_HLUT(i), lut);
2617 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2625 ** This routine refreshes vlan filters, called by init
2626 ** it scans the filter table and then updates the AQ
2629 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2631 struct ixl_vsi *vsi = &sc->vsi;
2632 struct ixlv_vlan_filter *f;
2635 if (vsi->num_vlans == 0)
2638 ** Scan the filter table for vlan entries,
2639 ** and if found call for the AQ update.
2641 SLIST_FOREACH(f, sc->vlan_filters, next)
2642 if (f->flags & IXL_FILTER_ADD)
2645 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2646 IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2651 ** This routine adds new MAC filters to the sc's list;
2652 ** these are later added in hardware by sending a virtual
2656 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2658 struct ixlv_mac_filter *f;
2660 /* Does one already exist? */
2661 f = ixlv_find_mac_filter(sc, macaddr);
2663 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2664 MAC_FORMAT_ARGS(macaddr));
2668 /* If not, get a new empty filter */
2669 f = ixlv_get_mac_filter(sc);
2671 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2676 IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2677 MAC_FORMAT_ARGS(macaddr));
2679 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2680 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2686 ** Marks a MAC filter for deletion.
2689 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2691 struct ixlv_mac_filter *f;
2693 f = ixlv_find_mac_filter(sc, macaddr);
2697 f->flags |= IXL_FILTER_DEL;
2702 ** Tasklet handler for MSIX Adminq interrupts
2703 ** - done outside interrupt context since it might sleep
2706 ixlv_do_adminq(void *context, int pending)
2708 struct ixlv_sc *sc = context;
2711 ixlv_do_adminq_locked(sc);
2712 mtx_unlock(&sc->mtx);
2717 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2719 struct i40e_hw *hw = &sc->hw;
2720 struct i40e_arq_event_info event;
2721 struct i40e_virtchnl_msg *v_msg;
2722 device_t dev = sc->dev;
2727 IXLV_CORE_LOCK_ASSERT(sc);
2729 event.buf_len = IXL_AQ_BUF_SZ;
2730 event.msg_buf = sc->aq_buffer;
2731 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2734 ret = i40e_clean_arq_element(hw, &event, &result);
2737 ixlv_vc_completion(sc, v_msg->v_opcode,
2738 v_msg->v_retval, event.msg_buf, event.msg_len);
2740 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2743 /* check for Admin queue errors */
2744 oldreg = reg = rd32(hw, hw->aq.arq.len);
2745 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2746 device_printf(dev, "ARQ VF Error detected\n");
2747 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2749 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2750 device_printf(dev, "ARQ Overflow Error detected\n");
2751 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2753 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2754 device_printf(dev, "ARQ Critical Error detected\n");
2755 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2758 wr32(hw, hw->aq.arq.len, reg);
2760 oldreg = reg = rd32(hw, hw->aq.asq.len);
2761 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2762 device_printf(dev, "ASQ VF Error detected\n");
2763 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2765 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2766 device_printf(dev, "ASQ Overflow Error detected\n");
2767 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2769 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2770 device_printf(dev, "ASQ Critical Error detected\n");
2771 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2774 wr32(hw, hw->aq.asq.len, reg);
2776 ixlv_enable_adminq_irq(hw);
2780 ixlv_add_sysctls(struct ixlv_sc *sc)
2782 device_t dev = sc->dev;
2783 struct ixl_vsi *vsi = &sc->vsi;
2784 struct i40e_eth_stats *es = &vsi->eth_stats;
2786 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2787 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2788 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2790 struct sysctl_oid *vsi_node, *queue_node;
2791 struct sysctl_oid_list *vsi_list, *queue_list;
2793 #define QUEUE_NAME_LEN 32
2794 char queue_namebuf[QUEUE_NAME_LEN];
2796 struct ixl_queue *queues = vsi->queues;
2797 struct tx_ring *txr;
2798 struct rx_ring *rxr;
2800 /* Driver statistics sysctls */
2801 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2802 CTLFLAG_RD, &sc->watchdog_events,
2803 "Watchdog timeouts");
2804 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2805 CTLFLAG_RD, &sc->admin_irq,
2806 "Admin Queue IRQ Handled");
2808 /* VSI statistics sysctls */
2809 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2810 CTLFLAG_RD, NULL, "VSI-specific statistics");
2811 vsi_list = SYSCTL_CHILDREN(vsi_node);
2813 struct ixl_sysctl_info ctls[] =
2815 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2816 {&es->rx_unicast, "ucast_pkts_rcvd",
2817 "Unicast Packets Received"},
2818 {&es->rx_multicast, "mcast_pkts_rcvd",
2819 "Multicast Packets Received"},
2820 {&es->rx_broadcast, "bcast_pkts_rcvd",
2821 "Broadcast Packets Received"},
2822 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2823 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2824 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2825 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2826 {&es->tx_multicast, "mcast_pkts_txd",
2827 "Multicast Packets Transmitted"},
2828 {&es->tx_broadcast, "bcast_pkts_txd",
2829 "Broadcast Packets Transmitted"},
2830 {&es->tx_errors, "tx_errors", "TX packet errors"},
2834 struct ixl_sysctl_info *entry = ctls;
2835 while (entry->stat != 0)
2837 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2838 CTLFLAG_RD, entry->stat,
2839 entry->description);
2844 for (int q = 0; q < vsi->num_queues; q++) {
2845 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2846 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2847 CTLFLAG_RD, NULL, "Queue Name");
2848 queue_list = SYSCTL_CHILDREN(queue_node);
2850 txr = &(queues[q].txr);
2851 rxr = &(queues[q].rxr);
2853 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2854 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2855 "m_defrag() failed");
2856 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2857 CTLFLAG_RD, &(queues[q].dropped_pkts),
2858 "Driver dropped packets");
2859 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2860 CTLFLAG_RD, &(queues[q].irqs),
2861 "irqs on this queue");
2862 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2863 CTLFLAG_RD, &(queues[q].tso),
2865 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2866 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2867 "Driver tx dma failure in xmit");
2868 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2869 CTLFLAG_RD, &(txr->no_desc),
2870 "Queue No Descriptor Available");
2871 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2872 CTLFLAG_RD, &(txr->total_packets),
2873 "Queue Packets Transmitted");
2874 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2875 CTLFLAG_RD, &(txr->tx_bytes),
2876 "Queue Bytes Transmitted");
2877 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2878 CTLFLAG_RD, &(rxr->rx_packets),
2879 "Queue Packets Received");
2880 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2881 CTLFLAG_RD, &(rxr->rx_bytes),
2882 "Queue Bytes Received");
2884 /* Examine queue state */
2885 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2886 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2887 sizeof(struct ixl_queue),
2888 ixlv_sysctl_qtx_tail_handler, "IU",
2889 "Queue Transmit Descriptor Tail");
2890 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2891 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2892 sizeof(struct ixl_queue),
2893 ixlv_sysctl_qrx_tail_handler, "IU",
2894 "Queue Receive Descriptor Tail");
2899 ixlv_init_filters(struct ixlv_sc *sc)
2901 sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2902 M_DEVBUF, M_NOWAIT | M_ZERO);
2903 SLIST_INIT(sc->mac_filters);
2904 sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2905 M_DEVBUF, M_NOWAIT | M_ZERO);
2906 SLIST_INIT(sc->vlan_filters);
2911 ixlv_free_filters(struct ixlv_sc *sc)
2913 struct ixlv_mac_filter *f;
2914 struct ixlv_vlan_filter *v;
2916 while (!SLIST_EMPTY(sc->mac_filters)) {
2917 f = SLIST_FIRST(sc->mac_filters);
2918 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2921 while (!SLIST_EMPTY(sc->vlan_filters)) {
2922 v = SLIST_FIRST(sc->vlan_filters);
2923 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2930 * ixlv_sysctl_qtx_tail_handler
2931 * Retrieves I40E_QTX_TAIL1 value from hardware
2935 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2937 struct ixl_queue *que;
2941 que = ((struct ixl_queue *)oidp->oid_arg1);
2944 val = rd32(que->vsi->hw, que->txr.tail);
2945 error = sysctl_handle_int(oidp, &val, 0, req);
2946 if (error || !req->newptr)
2952 * ixlv_sysctl_qrx_tail_handler
2953 * Retrieves I40E_QRX_TAIL1 value from hardware
2957 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2959 struct ixl_queue *que;
2963 que = ((struct ixl_queue *)oidp->oid_arg1);
2966 val = rd32(que->vsi->hw, que->rxr.tail);
2967 error = sysctl_handle_int(oidp, &val, 0, req);
2968 if (error || !req->newptr)