]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
Reintegrate head revisions r273096-r277147
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38 #include "ixl.h"
39 #include "ixlv.h"
40
41 /*********************************************************************
42  *  Driver version
43  *********************************************************************/
44 char ixlv_driver_version[] = "1.2.0";
45
46 /*********************************************************************
47  *  PCI Device ID Table
48  *
49  *  Used by probe to select devices to load on
50  *  Last field stores an index into ixlv_strings
51  *  Last entry must be all 0s
52  *
53  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
54  *********************************************************************/
55
56 static ixl_vendor_info_t ixlv_vendor_info_array[] =
57 {
58         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
59         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
60         /* required last entry */
61         {0, 0, 0, 0, 0}
62 };
63
64 /*********************************************************************
65  *  Table of branding strings
66  *********************************************************************/
67
68 static char    *ixlv_strings[] = {
69         "Intel(R) Ethernet Connection XL710 VF Driver"
70 };
71
72
73 /*********************************************************************
74  *  Function prototypes
75  *********************************************************************/
76 static int      ixlv_probe(device_t);
77 static int      ixlv_attach(device_t);
78 static int      ixlv_detach(device_t);
79 static int      ixlv_shutdown(device_t);
80 static void     ixlv_init_locked(struct ixlv_sc *);
81 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
82 static void     ixlv_free_pci_resources(struct ixlv_sc *);
83 static int      ixlv_assign_msix(struct ixlv_sc *);
84 static int      ixlv_init_msix(struct ixlv_sc *);
85 static int      ixlv_init_taskqueue(struct ixlv_sc *);
86 static int      ixlv_setup_queues(struct ixlv_sc *);
87 static void     ixlv_config_rss(struct ixlv_sc *);
88 static void     ixlv_stop(struct ixlv_sc *);
89 static void     ixlv_add_multi(struct ixl_vsi *);
90 static void     ixlv_del_multi(struct ixl_vsi *);
91 static void     ixlv_free_queues(struct ixl_vsi *);
92 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
93
94 static int      ixlv_media_change(struct ifnet *);
95 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
96
97 static void     ixlv_local_timer(void *);
98
99 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
100 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
101 static void     ixlv_init_filters(struct ixlv_sc *);
102 static void     ixlv_free_filters(struct ixlv_sc *);
103
104 static void     ixlv_msix_que(void *);
105 static void     ixlv_msix_adminq(void *);
106 static void     ixlv_do_adminq(void *, int);
107 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
108 static void     ixlv_handle_que(void *, int);
109 static int      ixlv_reset(struct ixlv_sc *);
110 static int      ixlv_reset_complete(struct i40e_hw *);
111 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
112 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
113 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
114                     enum i40e_status_code);
115
116 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
117 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
118 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
119 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
120
121 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
122 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
123 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
124
125 static void     ixlv_init_hw(struct ixlv_sc *);
126 static int      ixlv_setup_vc(struct ixlv_sc *);
127 static int      ixlv_vf_config(struct ixlv_sc *);
128
129 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
130                     struct ifnet *, int);
131
132 static void     ixlv_add_sysctls(struct ixlv_sc *);
133 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
134 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
135
136 /*********************************************************************
137  *  FreeBSD Device Interface Entry Points
138  *********************************************************************/
139
140 static device_method_t ixlv_methods[] = {
141         /* Device interface */
142         DEVMETHOD(device_probe, ixlv_probe),
143         DEVMETHOD(device_attach, ixlv_attach),
144         DEVMETHOD(device_detach, ixlv_detach),
145         DEVMETHOD(device_shutdown, ixlv_shutdown),
146         {0, 0}
147 };
148
149 static driver_t ixlv_driver = {
150         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
151 };
152
153 devclass_t ixlv_devclass;
154 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
155
156 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
157 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
158
159 /*
160 ** TUNEABLE PARAMETERS:
161 */
162
163 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
164                    "IXLV driver parameters");
165
166 /*
167 ** Number of descriptors per ring:
168 **   - TX and RX are the same size
169 */
170 static int ixlv_ringsz = DEFAULT_RING;
171 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
172 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
173     &ixlv_ringsz, 0, "Descriptor Ring Size");
174
175 /* Set to zero to auto calculate  */
176 int ixlv_max_queues = 0;
177 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
178 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
179     &ixlv_max_queues, 0, "Number of Queues");
180
181 /*
182 ** Number of entries in Tx queue buf_ring.
183 ** Increasing this will reduce the number of
184 ** errors when transmitting fragmented UDP
185 ** packets.
186 */
187 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
188 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
189 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
190     &ixlv_txbrsz, 0, "TX Buf Ring Size");
191
192 /*
193 ** Controls for Interrupt Throttling
194 **      - true/false for dynamic adjustment
195 **      - default values for static ITR
196 */
197 int ixlv_dynamic_rx_itr = 0;
198 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
199 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
200     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
201
202 int ixlv_dynamic_tx_itr = 0;
203 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
204 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
205     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
206
207 int ixlv_rx_itr = IXL_ITR_8K;
208 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
209 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
210     &ixlv_rx_itr, 0, "RX Interrupt Rate");
211
212 int ixlv_tx_itr = IXL_ITR_4K;
213 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
214 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
215     &ixlv_tx_itr, 0, "TX Interrupt Rate");
216
217         
218 /*********************************************************************
219  *  Device identification routine
220  *
221  *  ixlv_probe determines if the driver should be loaded on
222  *  the hardware based on PCI vendor/device id of the device.
223  *
224  *  return BUS_PROBE_DEFAULT on success, positive on failure
225  *********************************************************************/
226
227 static int
228 ixlv_probe(device_t dev)
229 {
230         ixl_vendor_info_t *ent;
231
232         u16     pci_vendor_id, pci_device_id;
233         u16     pci_subvendor_id, pci_subdevice_id;
234         char    device_name[256];
235
236         INIT_DEBUGOUT("ixlv_probe: begin");
237
238         pci_vendor_id = pci_get_vendor(dev);
239         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
240                 return (ENXIO);
241
242         pci_device_id = pci_get_device(dev);
243         pci_subvendor_id = pci_get_subvendor(dev);
244         pci_subdevice_id = pci_get_subdevice(dev);
245
246         ent = ixlv_vendor_info_array;
247         while (ent->vendor_id != 0) {
248                 if ((pci_vendor_id == ent->vendor_id) &&
249                     (pci_device_id == ent->device_id) &&
250
251                     ((pci_subvendor_id == ent->subvendor_id) ||
252                      (ent->subvendor_id == 0)) &&
253
254                     ((pci_subdevice_id == ent->subdevice_id) ||
255                      (ent->subdevice_id == 0))) {
256                         sprintf(device_name, "%s, Version - %s",
257                                 ixlv_strings[ent->index],
258                                 ixlv_driver_version);
259                         device_set_desc_copy(dev, device_name);
260                         return (BUS_PROBE_DEFAULT);
261                 }
262                 ent++;
263         }
264         return (ENXIO);
265 }
266
267 /*********************************************************************
268  *  Device initialization routine
269  *
270  *  The attach entry point is called when the driver is being loaded.
271  *  This routine identifies the type of hardware, allocates all resources
272  *  and initializes the hardware.
273  *
274  *  return 0 on success, positive on failure
275  *********************************************************************/
276
277 static int
278 ixlv_attach(device_t dev)
279 {
280         struct ixlv_sc  *sc;
281         struct i40e_hw  *hw;
282         struct ixl_vsi  *vsi;
283         int             error = 0;
284
285         INIT_DBG_DEV(dev, "begin");
286
287         /* Allocate, clear, and link in our primary soft structure */
288         sc = device_get_softc(dev);
289         sc->dev = sc->osdep.dev = dev;
290         hw = &sc->hw;
291         vsi = &sc->vsi;
292         vsi->dev = dev;
293
294         /* Initialize hw struct */
295         ixlv_init_hw(sc);
296
297         /* Allocate filter lists */
298         ixlv_init_filters(sc);
299
300         /* Core Lock Init*/
301         mtx_init(&sc->mtx, device_get_nameunit(dev),
302             "IXL SC Lock", MTX_DEF);
303
304         /* Set up the timer callout */
305         callout_init_mtx(&sc->timer, &sc->mtx, 0);
306
307         /* Do PCI setup - map BAR0, etc */
308         if (ixlv_allocate_pci_resources(sc)) {
309                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
310                     __func__);
311                 error = ENXIO;
312                 goto err_early;
313         }
314
315         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
316
317         error = i40e_set_mac_type(hw);
318         if (error) {
319                 device_printf(dev, "%s: set_mac_type failed: %d\n",
320                     __func__, error);
321                 goto err_pci_res;
322         }
323
324         error = ixlv_reset_complete(hw);
325         if (error) {
326                 device_printf(dev, "%s: Device is still being reset\n",
327                     __func__);
328                 goto err_pci_res;
329         }
330
331         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
332
333         error = ixlv_setup_vc(sc);
334         if (error) {
335                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
336                     __func__, error);
337                 goto err_pci_res;
338         }
339
340         INIT_DBG_DEV(dev, "PF API version verified");
341
342         /* TODO: Figure out why MDD events occur when this reset is removed. */
343         /* Need API version before sending reset message */
344         error = ixlv_reset(sc);
345         if (error) {
346                 device_printf(dev, "VF reset failed; reload the driver\n");
347                 goto err_aq;
348         }
349
350         INIT_DBG_DEV(dev, "VF reset complete");
351
352         /* Ask for VF config from PF */
353         error = ixlv_vf_config(sc);
354         if (error) {
355                 device_printf(dev, "Error getting configuration from PF: %d\n",
356                     error);
357                 goto err_aq;
358         }
359
360         INIT_DBG_DEV(dev, "VF config from PF:");
361         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
362             sc->vf_res->num_vsis,
363             sc->vf_res->num_queue_pairs,
364             sc->vf_res->max_vectors,
365             sc->vf_res->max_mtu);
366         INIT_DBG_DEV(dev, "Offload flags: %#010x",
367             sc->vf_res->vf_offload_flags);
368
369         // TODO: Move this into ixlv_vf_config?
370         /* got VF config message back from PF, now we can parse it */
371         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
372                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
373                         sc->vsi_res = &sc->vf_res->vsi_res[i];
374         }
375         if (!sc->vsi_res) {
376                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
377                 error = EIO;
378                 goto err_res_buf;
379         }
380
381         INIT_DBG_DEV(dev, "Resource Acquisition complete");
382
383         /* If no mac address was assigned just make a random one */
384         if (!ixlv_check_ether_addr(hw->mac.addr)) {
385                 u8 addr[ETHER_ADDR_LEN];
386                 arc4rand(&addr, sizeof(addr), 0);
387                 addr[0] &= 0xFE;
388                 addr[0] |= 0x02;
389                 bcopy(addr, hw->mac.addr, sizeof(addr));
390         }
391
392         vsi->id = sc->vsi_res->vsi_id;
393         vsi->back = (void *)sc;
394         vsi->link_up = TRUE;
395
396         /* This allocates the memory and early settings */
397         if (ixlv_setup_queues(sc) != 0) {
398                 device_printf(dev, "%s: setup queues failed!\n",
399                     __func__);
400                 error = EIO;
401                 goto out;
402         }
403
404         /* Setup the stack interface */
405         if (ixlv_setup_interface(dev, sc) != 0) {
406                 device_printf(dev, "%s: setup interface failed!\n",
407                     __func__);
408                 error = EIO;
409                 goto out;
410         }
411
412         INIT_DBG_DEV(dev, "Queue memory and interface setup");
413
414         /* Do queue interrupt setup */
415         ixlv_assign_msix(sc);
416
417         /* Start AdminQ taskqueue */
418         ixlv_init_taskqueue(sc);
419
420         /* Initialize stats */
421         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
422         ixlv_add_sysctls(sc);
423
424         /* Register for VLAN events */
425         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
426             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
427         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
428             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
429
430         /* We want AQ enabled early */
431         ixlv_enable_adminq_irq(hw);
432
433         /* Set things up to run init */
434         sc->init_state = IXLV_INIT_READY;
435
436         ixl_vc_init_mgr(sc, &sc->vc_mgr);
437
438         INIT_DBG_DEV(dev, "end");
439         return (error);
440
441 out:
442         ixlv_free_queues(vsi);
443 err_res_buf:
444         free(sc->vf_res, M_DEVBUF);
445 err_aq:
446         i40e_shutdown_adminq(hw);
447 err_pci_res:
448         ixlv_free_pci_resources(sc);
449 err_early:
450         mtx_destroy(&sc->mtx);
451         ixlv_free_filters(sc);
452         INIT_DBG_DEV(dev, "end: error %d", error);
453         return (error);
454 }
455
456 /*********************************************************************
457  *  Device removal routine
458  *
459  *  The detach entry point is called when the driver is being removed.
460  *  This routine stops the adapter and deallocates all the resources
461  *  that were allocated for driver operation.
462  *
463  *  return 0 on success, positive on failure
464  *********************************************************************/
465
466 static int
467 ixlv_detach(device_t dev)
468 {
469         struct ixlv_sc  *sc = device_get_softc(dev);
470         struct ixl_vsi  *vsi = &sc->vsi;
471
472         INIT_DBG_DEV(dev, "begin");
473
474         /* Make sure VLANS are not using driver */
475         if (vsi->ifp->if_vlantrunk != NULL) {
476                 device_printf(dev, "Vlan in use, detach first\n");
477                 INIT_DBG_DEV(dev, "end");
478                 return (EBUSY);
479         }
480
481         /* Stop driver */
482         ether_ifdetach(vsi->ifp);
483         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
484                 mtx_lock(&sc->mtx);     
485                 ixlv_stop(sc);
486                 mtx_unlock(&sc->mtx);   
487         }
488
489         /* Unregister VLAN events */
490         if (vsi->vlan_attach != NULL)
491                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
492         if (vsi->vlan_detach != NULL)
493                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
494
495         /* Drain VC mgr */
496         callout_drain(&sc->vc_mgr.callout);
497
498         i40e_shutdown_adminq(&sc->hw);
499         taskqueue_free(sc->tq);
500         if_free(vsi->ifp);
501         free(sc->vf_res, M_DEVBUF);
502         ixlv_free_pci_resources(sc);
503         ixlv_free_queues(vsi);
504         mtx_destroy(&sc->mtx);
505         ixlv_free_filters(sc);
506
507         bus_generic_detach(dev);
508         INIT_DBG_DEV(dev, "end");
509         return (0);
510 }
511
512 /*********************************************************************
513  *
514  *  Shutdown entry point
515  *
516  **********************************************************************/
517
518 static int
519 ixlv_shutdown(device_t dev)
520 {
521         struct ixlv_sc  *sc = device_get_softc(dev);
522
523         INIT_DBG_DEV(dev, "begin");
524
525         mtx_lock(&sc->mtx);     
526         ixlv_stop(sc);
527         mtx_unlock(&sc->mtx);   
528
529         INIT_DBG_DEV(dev, "end");
530         return (0);
531 }
532
533 /*
534  * Configure TXCSUM(IPV6) and TSO(4/6)
535  *      - the hardware handles these together so we
536  *        need to tweak them 
537  */
538 static void
539 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
540 {
541         /* Enable/disable TXCSUM/TSO4 */
542         if (!(ifp->if_capenable & IFCAP_TXCSUM)
543             && !(ifp->if_capenable & IFCAP_TSO4)) {
544                 if (mask & IFCAP_TXCSUM) {
545                         ifp->if_capenable |= IFCAP_TXCSUM;
546                         /* enable TXCSUM, restore TSO if previously enabled */
547                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
548                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
549                                 ifp->if_capenable |= IFCAP_TSO4;
550                         }
551                 }
552                 else if (mask & IFCAP_TSO4) {
553                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
554                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555                         if_printf(ifp,
556                             "TSO4 requires txcsum, enabling both...\n");
557                 }
558         } else if((ifp->if_capenable & IFCAP_TXCSUM)
559             && !(ifp->if_capenable & IFCAP_TSO4)) {
560                 if (mask & IFCAP_TXCSUM)
561                         ifp->if_capenable &= ~IFCAP_TXCSUM;
562                 else if (mask & IFCAP_TSO4)
563                         ifp->if_capenable |= IFCAP_TSO4;
564         } else if((ifp->if_capenable & IFCAP_TXCSUM)
565             && (ifp->if_capenable & IFCAP_TSO4)) {
566                 if (mask & IFCAP_TXCSUM) {
567                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
568                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
569                         if_printf(ifp, 
570                             "TSO4 requires txcsum, disabling both...\n");
571                 } else if (mask & IFCAP_TSO4)
572                         ifp->if_capenable &= ~IFCAP_TSO4;
573         }
574
575         /* Enable/disable TXCSUM_IPV6/TSO6 */
576         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
577             && !(ifp->if_capenable & IFCAP_TSO6)) {
578                 if (mask & IFCAP_TXCSUM_IPV6) {
579                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
580                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
581                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
582                                 ifp->if_capenable |= IFCAP_TSO6;
583                         }
584                 } else if (mask & IFCAP_TSO6) {
585                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
586                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
587                         if_printf(ifp,
588                             "TSO6 requires txcsum6, enabling both...\n");
589                 }
590         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
591             && !(ifp->if_capenable & IFCAP_TSO6)) {
592                 if (mask & IFCAP_TXCSUM_IPV6)
593                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
594                 else if (mask & IFCAP_TSO6)
595                         ifp->if_capenable |= IFCAP_TSO6;
596         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
597             && (ifp->if_capenable & IFCAP_TSO6)) {
598                 if (mask & IFCAP_TXCSUM_IPV6) {
599                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
600                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
601                         if_printf(ifp,
602                             "TSO6 requires txcsum6, disabling both...\n");
603                 } else if (mask & IFCAP_TSO6)
604                         ifp->if_capenable &= ~IFCAP_TSO6;
605         }
606 }
607
608 /*********************************************************************
609  *  Ioctl entry point
610  *
611  *  ixlv_ioctl is called when the user wants to configure the
612  *  interface.
613  *
614  *  return 0 on success, positive on failure
615  **********************************************************************/
616
617 static int
618 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
619 {
620         struct ixl_vsi          *vsi = ifp->if_softc;
621         struct ixlv_sc  *sc = vsi->back;
622         struct ifreq            *ifr = (struct ifreq *)data;
623 #if defined(INET) || defined(INET6)
624         struct ifaddr           *ifa = (struct ifaddr *)data;
625         bool                    avoid_reset = FALSE;
626 #endif
627         int                     error = 0;
628
629
630         switch (command) {
631
632         case SIOCSIFADDR:
633 #ifdef INET
634                 if (ifa->ifa_addr->sa_family == AF_INET)
635                         avoid_reset = TRUE;
636 #endif
637 #ifdef INET6
638                 if (ifa->ifa_addr->sa_family == AF_INET6)
639                         avoid_reset = TRUE;
640 #endif
641 #if defined(INET) || defined(INET6)
642                 /*
643                 ** Calling init results in link renegotiation,
644                 ** so we avoid doing it when possible.
645                 */
646                 if (avoid_reset) {
647                         ifp->if_flags |= IFF_UP;
648                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
649                                 ixlv_init(vsi);
650 #ifdef INET
651                         if (!(ifp->if_flags & IFF_NOARP))
652                                 arp_ifinit(ifp, ifa);
653 #endif
654                 } else
655                         error = ether_ioctl(ifp, command, data);
656                 break;
657 #endif
658         case SIOCSIFMTU:
659                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
660                 mtx_lock(&sc->mtx);
661                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
662                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
663                         error = EINVAL;
664                         IOCTL_DBG_IF(ifp, "mtu too large");
665                 } else {
666                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
667                         // ERJ: Interestingly enough, these types don't match
668                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
669                         vsi->max_frame_size =
670                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
671                             + ETHER_VLAN_ENCAP_LEN;
672                         ixlv_init_locked(sc);
673                 }
674                 mtx_unlock(&sc->mtx);
675                 break;
676         case SIOCSIFFLAGS:
677                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
678                 mtx_lock(&sc->mtx);
679                 if (ifp->if_flags & IFF_UP) {
680                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
681                                 ixlv_init_locked(sc);
682                 } else
683                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
684                                 ixlv_stop(sc);
685                 sc->if_flags = ifp->if_flags;
686                 mtx_unlock(&sc->mtx);
687                 break;
688         case SIOCADDMULTI:
689                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
690                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
691                         mtx_lock(&sc->mtx);
692                         ixlv_disable_intr(vsi);
693                         ixlv_add_multi(vsi);
694                         ixlv_enable_intr(vsi);
695                         mtx_unlock(&sc->mtx);
696                 }
697                 break;
698         case SIOCDELMULTI:
699                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
700                 if (sc->init_state == IXLV_RUNNING) {
701                         mtx_lock(&sc->mtx);
702                         ixlv_disable_intr(vsi);
703                         ixlv_del_multi(vsi);
704                         ixlv_enable_intr(vsi);
705                         mtx_unlock(&sc->mtx);
706                 }
707                 break;
708         case SIOCSIFMEDIA:
709         case SIOCGIFMEDIA:
710                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
711                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
712                 break;
713         case SIOCSIFCAP:
714         {
715                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
716                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
717
718                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
719
720                 if (mask & IFCAP_RXCSUM)
721                         ifp->if_capenable ^= IFCAP_RXCSUM;
722                 if (mask & IFCAP_RXCSUM_IPV6)
723                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
724                 if (mask & IFCAP_LRO)
725                         ifp->if_capenable ^= IFCAP_LRO;
726                 if (mask & IFCAP_VLAN_HWTAGGING)
727                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
728                 if (mask & IFCAP_VLAN_HWFILTER)
729                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
730                 if (mask & IFCAP_VLAN_HWTSO)
731                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
732                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
733                         ixlv_init(vsi);
734                 }
735                 VLAN_CAPABILITIES(ifp);
736
737                 break;
738         }
739
740         default:
741                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
742                 error = ether_ioctl(ifp, command, data);
743                 break;
744         }
745
746         return (error);
747 }
748
749 /*
750 ** To do a reinit on the VF is unfortunately more complicated
751 ** than a physical device, we must have the PF more or less
752 ** completely recreate our memory, so many things that were
753 ** done only once at attach in traditional drivers now must be
754 ** redone at each reinitialization. This function does that
755 ** 'prelude' so we can then call the normal locked init code.
756 */
757 int
758 ixlv_reinit_locked(struct ixlv_sc *sc)
759 {
760         struct i40e_hw          *hw = &sc->hw;
761         struct ixl_vsi          *vsi = &sc->vsi;
762         struct ifnet            *ifp = vsi->ifp;
763         struct ixlv_mac_filter  *mf, *mf_temp;
764         struct ixlv_vlan_filter *vf;
765         int                     error = 0;
766
767         INIT_DBG_IF(ifp, "begin");
768
769         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
770                 ixlv_stop(sc);
771
772         error = ixlv_reset(sc);
773
774         INIT_DBG_IF(ifp, "VF was reset");
775
776         /* set the state in case we went thru RESET */
777         sc->init_state = IXLV_RUNNING;
778
779         /*
780         ** Resetting the VF drops all filters from hardware;
781         ** we need to mark them to be re-added in init.
782         */
783         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
784                 if (mf->flags & IXL_FILTER_DEL) {
785                         SLIST_REMOVE(sc->mac_filters, mf,
786                             ixlv_mac_filter, next);
787                         free(mf, M_DEVBUF);
788                 } else
789                         mf->flags |= IXL_FILTER_ADD;
790         }
791         if (vsi->num_vlans != 0)
792                 SLIST_FOREACH(vf, sc->vlan_filters, next)
793                         vf->flags = IXL_FILTER_ADD;
794         else { /* clean any stale filters */
795                 while (!SLIST_EMPTY(sc->vlan_filters)) {
796                         vf = SLIST_FIRST(sc->vlan_filters);
797                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
798                         free(vf, M_DEVBUF);
799                 }
800         }
801
802         ixlv_enable_adminq_irq(hw);
803         ixl_vc_flush(&sc->vc_mgr);
804
805         INIT_DBG_IF(ifp, "end");
806         return (error);
807 }
808
809 static void
810 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
811         enum i40e_status_code code)
812 {
813         struct ixlv_sc *sc;
814
815         sc = arg;
816
817         /*
818          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
819          * happens while a command is in progress, so we don't print an error
820          * in that case.
821          */
822         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
823                 if_printf(sc->vsi.ifp,
824                     "Error %d waiting for PF to complete operation %d\n",
825                     code, cmd->request);
826         }
827 }
828
829 static void
830 ixlv_init_locked(struct ixlv_sc *sc)
831 {
832         struct i40e_hw          *hw = &sc->hw;
833         struct ixl_vsi          *vsi = &sc->vsi;
834         struct ixl_queue        *que = vsi->queues;
835         struct ifnet            *ifp = vsi->ifp;
836         int                      error = 0;
837
838         INIT_DBG_IF(ifp, "begin");
839
840         IXLV_CORE_LOCK_ASSERT(sc);
841
842         /* Do a reinit first if an init has already been done */
843         if ((sc->init_state == IXLV_RUNNING) ||
844             (sc->init_state == IXLV_RESET_REQUIRED) ||
845             (sc->init_state == IXLV_RESET_PENDING))
846                 error = ixlv_reinit_locked(sc);
847         /* Don't bother with init if we failed reinit */
848         if (error)
849                 goto init_done;
850
851         /* Remove existing MAC filter if new MAC addr is set */
852         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
853                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
854                 if (error == 0)
855                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
856                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
857                             sc);
858         }
859
860         /* Check for an LAA mac address... */
861         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
862
863         ifp->if_hwassist = 0;
864         if (ifp->if_capenable & IFCAP_TSO)
865                 ifp->if_hwassist |= CSUM_TSO;
866         if (ifp->if_capenable & IFCAP_TXCSUM)
867                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
868         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
869                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
870
871         /* Add mac filter for this VF to PF */
872         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
873                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
874                 if (!error || error == EEXIST)
875                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
876                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
877                             sc);
878         }
879
880         /* Setup vlan's if needed */
881         ixlv_setup_vlan_filters(sc);
882
883         /* Prepare the queues for operation */
884         for (int i = 0; i < vsi->num_queues; i++, que++) {
885                 struct  rx_ring *rxr = &que->rxr;
886
887                 ixl_init_tx_ring(que);
888
889                 if (vsi->max_frame_size <= 2048)
890                         rxr->mbuf_sz = MCLBYTES;
891                 else
892                         rxr->mbuf_sz = MJUMPAGESIZE;
893                 ixl_init_rx_ring(que);
894         }
895
896         /* Configure queues */
897         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
898             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
899
900         /* Set up RSS */
901         ixlv_config_rss(sc);
902
903         /* Map vectors */
904         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
905             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
906
907         /* Enable queues */
908         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
909             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
910
911         /* Start the local timer */
912         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
913
914         sc->init_state = IXLV_RUNNING;
915
916 init_done:
917         INIT_DBG_IF(ifp, "end");
918         return;
919 }
920
921 /*
922 **  Init entry point for the stack
923 */
924 void
925 ixlv_init(void *arg)
926 {
927         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
928         struct ixlv_sc *sc = vsi->back;
929         int retries = 0;
930
931         mtx_lock(&sc->mtx);
932         ixlv_init_locked(sc);
933         mtx_unlock(&sc->mtx);
934
935         /* Wait for init_locked to finish */
936         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
937             && ++retries < 100) {
938                 i40e_msec_delay(10);
939         }
940         if (retries >= IXLV_AQ_MAX_ERR)
941                 if_printf(vsi->ifp,
942                     "Init failed to complete in alloted time!\n");
943 }
944
945 /*
946  * ixlv_attach() helper function; gathers information about
947  * the (virtual) hardware for use elsewhere in the driver.
948  */
949 static void
950 ixlv_init_hw(struct ixlv_sc *sc)
951 {
952         struct i40e_hw *hw = &sc->hw;
953         device_t dev = sc->dev;
954         
955         /* Save off the information about this board */
956         hw->vendor_id = pci_get_vendor(dev);
957         hw->device_id = pci_get_device(dev);
958         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
959         hw->subsystem_vendor_id =
960             pci_read_config(dev, PCIR_SUBVEND_0, 2);
961         hw->subsystem_device_id =
962             pci_read_config(dev, PCIR_SUBDEV_0, 2);
963
964         hw->bus.device = pci_get_slot(dev);
965         hw->bus.func = pci_get_function(dev);
966 }
967
968 /*
969  * ixlv_attach() helper function; initalizes the admin queue
970  * and attempts to establish contact with the PF by
971  * retrying the initial "API version" message several times
972  * or until the PF responds.
973  */
974 static int
975 ixlv_setup_vc(struct ixlv_sc *sc)
976 {
977         struct i40e_hw *hw = &sc->hw;
978         device_t dev = sc->dev;
979         int error = 0, ret_error = 0, asq_retries = 0;
980         bool send_api_ver_retried = 0;
981
982         /* Need to set these AQ paramters before initializing AQ */
983         hw->aq.num_arq_entries = IXL_AQ_LEN;
984         hw->aq.num_asq_entries = IXL_AQ_LEN;
985         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
986         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
987
988         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
989                 /* Initialize admin queue */
990                 error = i40e_init_adminq(hw);
991                 if (error) {
992                         device_printf(dev, "%s: init_adminq failed: %d\n",
993                             __func__, error);
994                         ret_error = 1;
995                         continue;
996                 }
997
998                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
999
1000 retry_send:
1001                 /* Send VF's API version */
1002                 error = ixlv_send_api_ver(sc);
1003                 if (error) {
1004                         i40e_shutdown_adminq(hw);
1005                         ret_error = 2;
1006                         device_printf(dev, "%s: unable to send api"
1007                             " version to PF on attempt %d, error %d\n",
1008                             __func__, i+1, error);
1009                 }
1010
1011                 asq_retries = 0;
1012                 while (!i40e_asq_done(hw)) {
1013                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1014                                 i40e_shutdown_adminq(hw);
1015                                 DDPRINTF(dev, "Admin Queue timeout "
1016                                     "(waiting for send_api_ver), %d more retries...",
1017                                     IXLV_AQ_MAX_ERR - (i + 1));
1018                                 ret_error = 3;
1019                                 break;
1020                         } 
1021                         i40e_msec_delay(10);
1022                 }
1023                 if (asq_retries > IXLV_AQ_MAX_ERR)
1024                         continue;
1025
1026                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1027
1028                 /* Verify that the VF accepts the PF's API version */
1029                 error = ixlv_verify_api_ver(sc);
1030                 if (error == ETIMEDOUT) {
1031                         if (!send_api_ver_retried) {
1032                                 /* Resend message, one more time */
1033                                 send_api_ver_retried++;
1034                                 device_printf(dev,
1035                                     "%s: Timeout while verifying API version on first"
1036                                     " try!\n", __func__);
1037                                 goto retry_send;
1038                         } else {
1039                                 device_printf(dev,
1040                                     "%s: Timeout while verifying API version on second"
1041                                     " try!\n", __func__);
1042                                 ret_error = 4;
1043                                 break;
1044                         }
1045                 }
1046                 if (error) {
1047                         device_printf(dev,
1048                             "%s: Unable to verify API version,"
1049                             " error %d\n", __func__, error);
1050                         ret_error = 5;
1051                 }
1052                 break;
1053         }
1054
1055         if (ret_error >= 4)
1056                 i40e_shutdown_adminq(hw);
1057         return (ret_error);
1058 }
1059
1060 /*
1061  * ixlv_attach() helper function; asks the PF for this VF's
1062  * configuration, and saves the information if it receives it.
1063  */
1064 static int
1065 ixlv_vf_config(struct ixlv_sc *sc)
1066 {
1067         struct i40e_hw *hw = &sc->hw;
1068         device_t dev = sc->dev;
1069         int bufsz, error = 0, ret_error = 0;
1070         int asq_retries, retried = 0;
1071
1072 retry_config:
1073         error = ixlv_send_vf_config_msg(sc);
1074         if (error) {
1075                 device_printf(dev,
1076                     "%s: Unable to send VF config request, attempt %d,"
1077                     " error %d\n", __func__, retried + 1, error);
1078                 ret_error = 2;
1079         }
1080
1081         asq_retries = 0;
1082         while (!i40e_asq_done(hw)) {
1083                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1084                         device_printf(dev, "%s: Admin Queue timeout "
1085                             "(waiting for send_vf_config_msg), attempt %d\n",
1086                             __func__, retried + 1);
1087                         ret_error = 3;
1088                         goto fail;
1089                 }
1090                 i40e_msec_delay(10);
1091         }
1092
1093         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1094             retried + 1);
1095
1096         if (!sc->vf_res) {
1097                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1098                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1099                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1100                 if (!sc->vf_res) {
1101                         device_printf(dev,
1102                             "%s: Unable to allocate memory for VF configuration"
1103                             " message from PF on attempt %d\n", __func__, retried + 1);
1104                         ret_error = 1;
1105                         goto fail;
1106                 }
1107         }
1108
1109         /* Check for VF config response */
1110         error = ixlv_get_vf_config(sc);
1111         if (error == ETIMEDOUT) {
1112                 /* The 1st time we timeout, send the configuration message again */
1113                 if (!retried) {
1114                         retried++;
1115                         goto retry_config;
1116                 }
1117         }
1118         if (error) {
1119                 device_printf(dev,
1120                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1121                     __func__, retried + 1);
1122                 ret_error = 4;
1123         }
1124         goto done;
1125
1126 fail:
1127         free(sc->vf_res, M_DEVBUF);
1128 done:
1129         return (ret_error);
1130 }
1131
1132 /*
1133  * Allocate MSI/X vectors, setup the AQ vector early
1134  */
1135 static int
1136 ixlv_init_msix(struct ixlv_sc *sc)
1137 {
1138         device_t dev = sc->dev;
1139         int rid, want, vectors, queues, available;
1140
1141         rid = PCIR_BAR(IXL_BAR);
1142         sc->msix_mem = bus_alloc_resource_any(dev,
1143             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1144         if (!sc->msix_mem) {
1145                 /* May not be enabled */
1146                 device_printf(sc->dev,
1147                     "Unable to map MSIX table \n");
1148                 goto fail;
1149         }
1150
1151         available = pci_msix_count(dev); 
1152         if (available == 0) { /* system has msix disabled */
1153                 bus_release_resource(dev, SYS_RES_MEMORY,
1154                     rid, sc->msix_mem);
1155                 sc->msix_mem = NULL;
1156                 goto fail;
1157         }
1158
1159         /* Figure out a reasonable auto config value */
1160         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1161
1162         /* Override with hardcoded value if sane */
1163         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1164                 queues = ixlv_max_queues;
1165 #ifdef  RSS
1166         /* If we're doing RSS, clamp at the number of RSS buckets */
1167         if (queues > rss_getnumbuckets())
1168                 queues = rss_getnumbuckets();
1169 #endif
1170         /* Enforce the VF max value */
1171         if (queues > IXLV_MAX_QUEUES)
1172                 queues = IXLV_MAX_QUEUES;
1173
1174         /*
1175         ** Want one vector (RX/TX pair) per queue
1176         ** plus an additional for the admin queue.
1177         */
1178         want = queues + 1;
1179         if (want <= available)  /* Have enough */
1180                 vectors = want;
1181         else {
1182                 device_printf(sc->dev,
1183                     "MSIX Configuration Problem, "
1184                     "%d vectors available but %d wanted!\n",
1185                     available, want);
1186                 goto fail;
1187         }
1188
1189 #ifdef RSS
1190         /*
1191         * If we're doing RSS, the number of queues needs to
1192         * match the number of RSS buckets that are configured.
1193         *
1194         * + If there's more queues than RSS buckets, we'll end
1195         *   up with queues that get no traffic.
1196         *
1197         * + If there's more RSS buckets than queues, we'll end
1198         *   up having multiple RSS buckets map to the same queue,
1199         *   so there'll be some contention.
1200         */
1201         if (queues != rss_getnumbuckets()) {
1202                 device_printf(dev,
1203                     "%s: queues (%d) != RSS buckets (%d)"
1204                     "; performance will be impacted.\n",
1205                      __func__, queues, rss_getnumbuckets());
1206         }
1207 #endif
1208
1209         if (pci_alloc_msix(dev, &vectors) == 0) {
1210                 device_printf(sc->dev,
1211                     "Using MSIX interrupts with %d vectors\n", vectors);
1212                 sc->msix = vectors;
1213                 sc->vsi.num_queues = queues;
1214         }
1215
1216         /*
1217         ** Explicitly set the guest PCI BUSMASTER capability
1218         ** and we must rewrite the ENABLE in the MSIX control
1219         ** register again at this point to cause the host to
1220         ** successfully initialize us.
1221         */
1222         {
1223                 u16 pci_cmd_word;
1224                 int msix_ctrl;
1225                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1226                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1227                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1228                 pci_find_cap(dev, PCIY_MSIX, &rid);
1229                 rid += PCIR_MSIX_CTRL;
1230                 msix_ctrl = pci_read_config(dev, rid, 2);
1231                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1232                 pci_write_config(dev, rid, msix_ctrl, 2);
1233         }
1234
1235         /* Next we need to setup the vector for the Admin Queue */
1236         rid = 1;        // zero vector + 1
1237         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1238             &rid, RF_SHAREABLE | RF_ACTIVE);
1239         if (sc->res == NULL) {
1240                 device_printf(dev,"Unable to allocate"
1241                     " bus resource: AQ interrupt \n");
1242                 goto fail;
1243         }
1244         if (bus_setup_intr(dev, sc->res,
1245             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1246             ixlv_msix_adminq, sc, &sc->tag)) {
1247                 sc->res = NULL;
1248                 device_printf(dev, "Failed to register AQ handler");
1249                 goto fail;
1250         }
1251         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1252
1253         return (vectors);
1254
1255 fail:
1256         /* The VF driver MUST use MSIX */
1257         return (0);
1258 }
1259
1260 static int
1261 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1262 {
1263         int             rid;
1264         device_t        dev = sc->dev;
1265
1266         rid = PCIR_BAR(0);
1267         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1268             &rid, RF_ACTIVE);
1269
1270         if (!(sc->pci_mem)) {
1271                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1272                 return (ENXIO);
1273         }
1274
1275         sc->osdep.mem_bus_space_tag =
1276                 rman_get_bustag(sc->pci_mem);
1277         sc->osdep.mem_bus_space_handle =
1278                 rman_get_bushandle(sc->pci_mem);
1279         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1280         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1281         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1282
1283         sc->hw.back = &sc->osdep;
1284
1285         /* Disable adminq interrupts */
1286         ixlv_disable_adminq_irq(&sc->hw);
1287
1288         /*
1289         ** Now setup MSI/X, it will return
1290         ** us the number of supported vectors
1291         */
1292         sc->msix = ixlv_init_msix(sc);
1293
1294         /* We fail without MSIX support */
1295         if (sc->msix == 0)
1296                 return (ENXIO);
1297
1298         return (0);
1299 }
1300
1301 static void
1302 ixlv_free_pci_resources(struct ixlv_sc *sc)
1303 {
1304         struct ixl_vsi         *vsi = &sc->vsi;
1305         struct ixl_queue       *que = vsi->queues;
1306         device_t                dev = sc->dev;
1307
1308         /* We may get here before stations are setup */
1309         if (que == NULL)
1310                 goto early;
1311
1312         /*
1313         **  Release all msix queue resources:
1314         */
1315         for (int i = 0; i < vsi->num_queues; i++, que++) {
1316                 int rid = que->msix + 1;
1317                 if (que->tag != NULL) {
1318                         bus_teardown_intr(dev, que->res, que->tag);
1319                         que->tag = NULL;
1320                 }
1321                 if (que->res != NULL)
1322                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1323         }
1324         
1325 early:
1326         /* Clean the AdminQ interrupt */
1327         if (sc->tag != NULL) {
1328                 bus_teardown_intr(dev, sc->res, sc->tag);
1329                 sc->tag = NULL;
1330         }
1331         if (sc->res != NULL)
1332                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1333
1334         pci_release_msi(dev);
1335
1336         if (sc->msix_mem != NULL)
1337                 bus_release_resource(dev, SYS_RES_MEMORY,
1338                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1339
1340         if (sc->pci_mem != NULL)
1341                 bus_release_resource(dev, SYS_RES_MEMORY,
1342                     PCIR_BAR(0), sc->pci_mem);
1343
1344         return;
1345 }
1346
1347 /*
1348  * Create taskqueue and tasklet for Admin Queue interrupts.
1349  */
1350 static int
1351 ixlv_init_taskqueue(struct ixlv_sc *sc)
1352 {
1353         int error = 0;
1354
1355         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1356
1357         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1358             taskqueue_thread_enqueue, &sc->tq);
1359         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1360             device_get_nameunit(sc->dev));
1361
1362         return (error);
1363 }
1364
1365 /*********************************************************************
1366  *
1367  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1368  *
1369  **********************************************************************/
1370 static int
1371 ixlv_assign_msix(struct ixlv_sc *sc)
1372 {
1373         device_t        dev = sc->dev;
1374         struct          ixl_vsi *vsi = &sc->vsi;
1375         struct          ixl_queue *que = vsi->queues;
1376         struct          tx_ring  *txr;
1377         int             error, rid, vector = 1;
1378
1379         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1380                 int cpu_id = i;
1381                 rid = vector + 1;
1382                 txr = &que->txr;
1383                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1384                     RF_SHAREABLE | RF_ACTIVE);
1385                 if (que->res == NULL) {
1386                         device_printf(dev,"Unable to allocate"
1387                             " bus resource: que interrupt [%d]\n", vector);
1388                         return (ENXIO);
1389                 }
1390                 /* Set the handler function */
1391                 error = bus_setup_intr(dev, que->res,
1392                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1393                     ixlv_msix_que, que, &que->tag);
1394                 if (error) {
1395                         que->res = NULL;
1396                         device_printf(dev, "Failed to register que handler");
1397                         return (error);
1398                 }
1399                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1400                 /* Bind the vector to a CPU */
1401 #ifdef RSS
1402                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1403 #endif
1404                 bus_bind_intr(dev, que->res, cpu_id);
1405                 que->msix = vector;
1406                 vsi->que_mask |= (u64)(1 << que->msix);
1407                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1408                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1409                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1410                     taskqueue_thread_enqueue, &que->tq);
1411 #ifdef RSS
1412                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1413                     cpu_id, "%s (bucket %d)",
1414                     device_get_nameunit(dev), cpu_id);
1415 #else
1416                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1417                     "%s que", device_get_nameunit(dev));
1418 #endif
1419
1420         }
1421
1422         return (0);
1423 }
1424
1425 /*
1426 ** Requests a VF reset from the PF.
1427 **
1428 ** Requires the VF's Admin Queue to be initialized.
1429 */
1430 static int
1431 ixlv_reset(struct ixlv_sc *sc)
1432 {
1433         struct i40e_hw  *hw = &sc->hw;
1434         device_t        dev = sc->dev;
1435         int             error = 0;
1436
1437         /* Ask the PF to reset us if we are initiating */
1438         if (sc->init_state != IXLV_RESET_PENDING)
1439                 ixlv_request_reset(sc);
1440
1441         i40e_msec_delay(100);
1442         error = ixlv_reset_complete(hw);
1443         if (error) {
1444                 device_printf(dev, "%s: VF reset failed\n",
1445                     __func__);
1446                 return (error);
1447         }
1448
1449         error = i40e_shutdown_adminq(hw);
1450         if (error) {
1451                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1452                     __func__, error);
1453                 return (error);
1454         }
1455
1456         error = i40e_init_adminq(hw);
1457         if (error) {
1458                 device_printf(dev, "%s: init_adminq failed: %d\n",
1459                     __func__, error);
1460                 return(error);
1461         }
1462
1463         return (0);
1464 }
1465
1466 static int
1467 ixlv_reset_complete(struct i40e_hw *hw)
1468 {
1469         u32 reg;
1470
1471         for (int i = 0; i < 100; i++) {
1472                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1473                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1474
1475                 if ((reg == I40E_VFR_VFACTIVE) ||
1476                     (reg == I40E_VFR_COMPLETED))
1477                         return (0);
1478                 i40e_msec_delay(100);
1479         }
1480
1481         return (EBUSY);
1482 }
1483
1484
1485 /*********************************************************************
1486  *
1487  *  Setup networking device structure and register an interface.
1488  *
1489  **********************************************************************/
1490 static int
1491 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1492 {
1493         struct ifnet            *ifp;
1494         struct ixl_vsi          *vsi = &sc->vsi;
1495         struct ixl_queue        *que = vsi->queues;
1496
1497         INIT_DBG_DEV(dev, "begin");
1498
1499         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1500         if (ifp == NULL) {
1501                 device_printf(dev, "%s: could not allocate ifnet"
1502                     " structure!\n", __func__);
1503                 return (-1);
1504         }
1505
1506         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1507
1508         ifp->if_mtu = ETHERMTU;
1509         ifp->if_baudrate = 4000000000;  // ??
1510         ifp->if_init = ixlv_init;
1511         ifp->if_softc = vsi;
1512         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1513         ifp->if_ioctl = ixlv_ioctl;
1514
1515 #if __FreeBSD_version >= 1100000
1516         if_setgetcounterfn(ifp, ixl_get_counter);
1517 #endif
1518
1519         ifp->if_transmit = ixl_mq_start;
1520
1521         ifp->if_qflush = ixl_qflush;
1522         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1523
1524         ether_ifattach(ifp, sc->hw.mac.addr);
1525
1526         vsi->max_frame_size =
1527             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1528             + ETHER_VLAN_ENCAP_LEN;
1529
1530         /*
1531          * Tell the upper layer(s) we support long frames.
1532          */
1533         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1534
1535         ifp->if_capabilities |= IFCAP_HWCSUM;
1536         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1537         ifp->if_capabilities |= IFCAP_TSO;
1538         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1539
1540         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1541                              |  IFCAP_VLAN_HWTSO
1542                              |  IFCAP_VLAN_MTU
1543                              |  IFCAP_VLAN_HWCSUM
1544                              |  IFCAP_LRO;
1545         ifp->if_capenable = ifp->if_capabilities;
1546
1547         /*
1548         ** Don't turn this on by default, if vlans are
1549         ** created on another pseudo device (eg. lagg)
1550         ** then vlan events are not passed thru, breaking
1551         ** operation, but with HW FILTER off it works. If
1552         ** using vlans directly on the ixl driver you can
1553         ** enable this and get full hardware tag filtering.
1554         */
1555         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1556
1557         /*
1558          * Specify the media types supported by this adapter and register
1559          * callbacks to update media and link information
1560          */
1561         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1562                      ixlv_media_status);
1563
1564         // JFV Add media types later?
1565
1566         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1567         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1568
1569         INIT_DBG_DEV(dev, "end");
1570         return (0);
1571 }
1572
1573 /*
1574 ** Allocate and setup the interface queues
1575 */
1576 static int
1577 ixlv_setup_queues(struct ixlv_sc *sc)
1578 {
1579         device_t                dev = sc->dev;
1580         struct ixl_vsi          *vsi;
1581         struct ixl_queue        *que;
1582         struct tx_ring          *txr;
1583         struct rx_ring          *rxr;
1584         int                     rsize, tsize;
1585         int                     error = I40E_SUCCESS;
1586
1587         vsi = &sc->vsi;
1588         vsi->back = (void *)sc;
1589         vsi->hw = &sc->hw;
1590         vsi->num_vlans = 0;
1591
1592         /* Get memory for the station queues */
1593         if (!(vsi->queues =
1594                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1595                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1596                         device_printf(dev, "Unable to allocate queue memory\n");
1597                         error = ENOMEM;
1598                         goto early;
1599         }
1600
1601         for (int i = 0; i < vsi->num_queues; i++) {
1602                 que = &vsi->queues[i];
1603                 que->num_desc = ixlv_ringsz;
1604                 que->me = i;
1605                 que->vsi = vsi;
1606                 /* mark the queue as active */
1607                 vsi->active_queues |= (u64)1 << que->me;
1608
1609                 txr = &que->txr;
1610                 txr->que = que;
1611                 txr->tail = I40E_QTX_TAIL1(que->me);
1612                 /* Initialize the TX lock */
1613                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1614                     device_get_nameunit(dev), que->me);
1615                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1616                 /*
1617                 ** Create the TX descriptor ring, the extra int is
1618                 ** added as the location for HEAD WB.
1619                 */
1620                 tsize = roundup2((que->num_desc *
1621                     sizeof(struct i40e_tx_desc)) +
1622                     sizeof(u32), DBA_ALIGN);
1623                 if (i40e_allocate_dma_mem(&sc->hw,
1624                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1625                         device_printf(dev,
1626                             "Unable to allocate TX Descriptor memory\n");
1627                         error = ENOMEM;
1628                         goto fail;
1629                 }
1630                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1631                 bzero((void *)txr->base, tsize);
1632                 /* Now allocate transmit soft structs for the ring */
1633                 if (ixl_allocate_tx_data(que)) {
1634                         device_printf(dev,
1635                             "Critical Failure setting up TX structures\n");
1636                         error = ENOMEM;
1637                         goto fail;
1638                 }
1639                 /* Allocate a buf ring */
1640                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1641                     M_WAITOK, &txr->mtx);
1642                 if (txr->br == NULL) {
1643                         device_printf(dev,
1644                             "Critical Failure setting up TX buf ring\n");
1645                         error = ENOMEM;
1646                         goto fail;
1647                 }
1648
1649                 /*
1650                  * Next the RX queues...
1651                  */ 
1652                 rsize = roundup2(que->num_desc *
1653                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1654                 rxr = &que->rxr;
1655                 rxr->que = que;
1656                 rxr->tail = I40E_QRX_TAIL1(que->me);
1657
1658                 /* Initialize the RX side lock */
1659                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1660                     device_get_nameunit(dev), que->me);
1661                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1662
1663                 if (i40e_allocate_dma_mem(&sc->hw,
1664                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1665                         device_printf(dev,
1666                             "Unable to allocate RX Descriptor memory\n");
1667                         error = ENOMEM;
1668                         goto fail;
1669                 }
1670                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1671                 bzero((void *)rxr->base, rsize);
1672
1673                 /* Allocate receive soft structs for the ring*/
1674                 if (ixl_allocate_rx_data(que)) {
1675                         device_printf(dev,
1676                             "Critical Failure setting up receive structs\n");
1677                         error = ENOMEM;
1678                         goto fail;
1679                 }
1680         }
1681
1682         return (0);
1683
1684 fail:
1685         free(vsi->queues, M_DEVBUF);
1686         for (int i = 0; i < vsi->num_queues; i++) {
1687                 que = &vsi->queues[i];
1688                 rxr = &que->rxr;
1689                 txr = &que->txr;
1690                 if (rxr->base)
1691                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1692                 if (txr->base)
1693                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1694         }
1695
1696 early:
1697         return (error);
1698 }
1699
1700 /*
1701 ** This routine is run via an vlan config EVENT,
1702 ** it enables us to use the HW Filter table since
1703 ** we can get the vlan id. This just creates the
1704 ** entry in the soft version of the VFTA, init will
1705 ** repopulate the real table.
1706 */
1707 static void
1708 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1709 {
1710         struct ixl_vsi                  *vsi = ifp->if_softc;
1711         struct ixlv_sc          *sc = vsi->back;
1712         struct ixlv_vlan_filter *v;
1713
1714
1715         if (ifp->if_softc !=  arg)   /* Not our event */
1716                 return;
1717
1718         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1719                 return;
1720
1721         /* Sanity check - make sure it doesn't already exist */
1722         SLIST_FOREACH(v, sc->vlan_filters, next) {
1723                 if (v->vlan == vtag)
1724                         return;
1725         }
1726
1727         mtx_lock(&sc->mtx);
1728         ++vsi->num_vlans;
1729         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1730         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1731         v->vlan = vtag;
1732         v->flags = IXL_FILTER_ADD;
1733         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1734             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1735         mtx_unlock(&sc->mtx);
1736         return;
1737 }
1738
1739 /*
1740 ** This routine is run via an vlan
1741 ** unconfig EVENT, remove our entry
1742 ** in the soft vfta.
1743 */
1744 static void
1745 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1746 {
1747         struct ixl_vsi                  *vsi = ifp->if_softc;
1748         struct ixlv_sc          *sc = vsi->back;
1749         struct ixlv_vlan_filter *v;
1750         int                             i = 0;
1751         
1752         if (ifp->if_softc !=  arg)
1753                 return;
1754
1755         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1756                 return;
1757
1758         mtx_lock(&sc->mtx);
1759         SLIST_FOREACH(v, sc->vlan_filters, next) {
1760                 if (v->vlan == vtag) {
1761                         v->flags = IXL_FILTER_DEL;
1762                         ++i;
1763                         --vsi->num_vlans;
1764                 }
1765         }
1766         if (i)
1767                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1768                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1769         mtx_unlock(&sc->mtx);
1770         return;
1771 }
1772
1773 /*
1774 ** Get a new filter and add it to the mac filter list.
1775 */
1776 static struct ixlv_mac_filter *
1777 ixlv_get_mac_filter(struct ixlv_sc *sc)
1778 {
1779         struct ixlv_mac_filter  *f;
1780
1781         f = malloc(sizeof(struct ixlv_mac_filter),
1782             M_DEVBUF, M_NOWAIT | M_ZERO);
1783         if (f)
1784                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1785
1786         return (f);
1787 }
1788
1789 /*
1790 ** Find the filter with matching MAC address
1791 */
1792 static struct ixlv_mac_filter *
1793 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1794 {
1795         struct ixlv_mac_filter  *f;
1796         bool                            match = FALSE;
1797
1798         SLIST_FOREACH(f, sc->mac_filters, next) {
1799                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1800                         match = TRUE;
1801                         break;
1802                 }
1803         }       
1804
1805         if (!match)
1806                 f = NULL;
1807         return (f);
1808 }
1809
1810 /*
1811 ** Admin Queue interrupt handler
1812 */
1813 static void
1814 ixlv_msix_adminq(void *arg)
1815 {
1816         struct ixlv_sc  *sc = arg;
1817         struct i40e_hw  *hw = &sc->hw;
1818         device_t        dev = sc->dev;
1819         u32             reg, mask, oldreg;
1820
1821         reg = rd32(hw, I40E_VFINT_ICR01);
1822         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1823
1824         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1825         reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1826         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1827
1828         /* check for Admin queue errors */
1829         oldreg = reg = rd32(hw, hw->aq.arq.len);
1830         if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1831                 device_printf(dev, "ARQ VF Error detected\n");
1832                 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1833         }
1834         if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1835                 device_printf(dev, "ARQ Overflow Error detected\n");
1836                 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1837         }
1838         if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1839                 device_printf(dev, "ARQ Critical Error detected\n");
1840                 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1841         }
1842         if (oldreg != reg)
1843                 wr32(hw, hw->aq.arq.len, reg);
1844
1845         oldreg = reg = rd32(hw, hw->aq.asq.len);
1846         if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1847                 device_printf(dev, "ASQ VF Error detected\n");
1848                 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1849         }
1850         if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1851                 device_printf(dev, "ASQ Overflow Error detected\n");
1852                 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1853         }
1854         if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1855                 device_printf(dev, "ASQ Critical Error detected\n");
1856                 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1857         }
1858         if (oldreg != reg)
1859                 wr32(hw, hw->aq.asq.len, reg);
1860
1861         /* re-enable interrupt causes */
1862         wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1863         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1864
1865         /* schedule task */
1866         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1867         return;
1868 }
1869
1870 void
1871 ixlv_enable_intr(struct ixl_vsi *vsi)
1872 {
1873         struct i40e_hw          *hw = vsi->hw;
1874         struct ixl_queue        *que = vsi->queues;
1875
1876         ixlv_enable_adminq_irq(hw);
1877         for (int i = 0; i < vsi->num_queues; i++, que++)
1878                 ixlv_enable_queue_irq(hw, que->me);
1879 }
1880
1881 void
1882 ixlv_disable_intr(struct ixl_vsi *vsi)
1883 {
1884         struct i40e_hw          *hw = vsi->hw;
1885         struct ixl_queue       *que = vsi->queues;
1886
1887         ixlv_disable_adminq_irq(hw);
1888         for (int i = 0; i < vsi->num_queues; i++, que++)
1889                 ixlv_disable_queue_irq(hw, que->me);
1890 }
1891
1892
1893 static void
1894 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1895 {
1896         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1897         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1898         /* flush */
1899         rd32(hw, I40E_VFGEN_RSTAT);
1900         return;
1901 }
1902
1903 static void
1904 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1905 {
1906         wr32(hw, I40E_VFINT_DYN_CTL01,
1907             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1908             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1909         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1910         /* flush */
1911         rd32(hw, I40E_VFGEN_RSTAT);
1912         return;
1913 }
1914
1915 static void
1916 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1917 {
1918         u32             reg;
1919
1920         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1921             I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; 
1922         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1923 }
1924
1925 static void
1926 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1927 {
1928         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1929         rd32(hw, I40E_VFGEN_RSTAT);
1930         return;
1931 }
1932
1933
1934 /*
1935 ** Provide a update to the queue RX
1936 ** interrupt moderation value.
1937 */
1938 static void
1939 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1940 {
1941         struct ixl_vsi  *vsi = que->vsi;
1942         struct i40e_hw  *hw = vsi->hw;
1943         struct rx_ring  *rxr = &que->rxr;
1944         u16             rx_itr;
1945         u16             rx_latency = 0;
1946         int             rx_bytes;
1947
1948
1949         /* Idle, do nothing */
1950         if (rxr->bytes == 0)
1951                 return;
1952
1953         if (ixlv_dynamic_rx_itr) {
1954                 rx_bytes = rxr->bytes/rxr->itr;
1955                 rx_itr = rxr->itr;
1956
1957                 /* Adjust latency range */
1958                 switch (rxr->latency) {
1959                 case IXL_LOW_LATENCY:
1960                         if (rx_bytes > 10) {
1961                                 rx_latency = IXL_AVE_LATENCY;
1962                                 rx_itr = IXL_ITR_20K;
1963                         }
1964                         break;
1965                 case IXL_AVE_LATENCY:
1966                         if (rx_bytes > 20) {
1967                                 rx_latency = IXL_BULK_LATENCY;
1968                                 rx_itr = IXL_ITR_8K;
1969                         } else if (rx_bytes <= 10) {
1970                                 rx_latency = IXL_LOW_LATENCY;
1971                                 rx_itr = IXL_ITR_100K;
1972                         }
1973                         break;
1974                 case IXL_BULK_LATENCY:
1975                         if (rx_bytes <= 20) {
1976                                 rx_latency = IXL_AVE_LATENCY;
1977                                 rx_itr = IXL_ITR_20K;
1978                         }
1979                         break;
1980                  }
1981
1982                 rxr->latency = rx_latency;
1983
1984                 if (rx_itr != rxr->itr) {
1985                         /* do an exponential smoothing */
1986                         rx_itr = (10 * rx_itr * rxr->itr) /
1987                             ((9 * rx_itr) + rxr->itr);
1988                         rxr->itr = rx_itr & IXL_MAX_ITR;
1989                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1990                             que->me), rxr->itr);
1991                 }
1992         } else { /* We may have have toggled to non-dynamic */
1993                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1994                         vsi->rx_itr_setting = ixlv_rx_itr;
1995                 /* Update the hardware if needed */
1996                 if (rxr->itr != vsi->rx_itr_setting) {
1997                         rxr->itr = vsi->rx_itr_setting;
1998                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1999                             que->me), rxr->itr);
2000                 }
2001         }
2002         rxr->bytes = 0;
2003         rxr->packets = 0;
2004         return;
2005 }
2006
2007
2008 /*
2009 ** Provide a update to the queue TX
2010 ** interrupt moderation value.
2011 */
2012 static void
2013 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2014 {
2015         struct ixl_vsi  *vsi = que->vsi;
2016         struct i40e_hw  *hw = vsi->hw;
2017         struct tx_ring  *txr = &que->txr;
2018         u16             tx_itr;
2019         u16             tx_latency = 0;
2020         int             tx_bytes;
2021
2022
2023         /* Idle, do nothing */
2024         if (txr->bytes == 0)
2025                 return;
2026
2027         if (ixlv_dynamic_tx_itr) {
2028                 tx_bytes = txr->bytes/txr->itr;
2029                 tx_itr = txr->itr;
2030
2031                 switch (txr->latency) {
2032                 case IXL_LOW_LATENCY:
2033                         if (tx_bytes > 10) {
2034                                 tx_latency = IXL_AVE_LATENCY;
2035                                 tx_itr = IXL_ITR_20K;
2036                         }
2037                         break;
2038                 case IXL_AVE_LATENCY:
2039                         if (tx_bytes > 20) {
2040                                 tx_latency = IXL_BULK_LATENCY;
2041                                 tx_itr = IXL_ITR_8K;
2042                         } else if (tx_bytes <= 10) {
2043                                 tx_latency = IXL_LOW_LATENCY;
2044                                 tx_itr = IXL_ITR_100K;
2045                         }
2046                         break;
2047                 case IXL_BULK_LATENCY:
2048                         if (tx_bytes <= 20) {
2049                                 tx_latency = IXL_AVE_LATENCY;
2050                                 tx_itr = IXL_ITR_20K;
2051                         }
2052                         break;
2053                 }
2054
2055                 txr->latency = tx_latency;
2056
2057                 if (tx_itr != txr->itr) {
2058                  /* do an exponential smoothing */
2059                         tx_itr = (10 * tx_itr * txr->itr) /
2060                             ((9 * tx_itr) + txr->itr);
2061                         txr->itr = tx_itr & IXL_MAX_ITR;
2062                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2063                             que->me), txr->itr);
2064                 }
2065
2066         } else { /* We may have have toggled to non-dynamic */
2067                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2068                         vsi->tx_itr_setting = ixlv_tx_itr;
2069                 /* Update the hardware if needed */
2070                 if (txr->itr != vsi->tx_itr_setting) {
2071                         txr->itr = vsi->tx_itr_setting;
2072                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2073                             que->me), txr->itr);
2074                 }
2075         }
2076         txr->bytes = 0;
2077         txr->packets = 0;
2078         return;
2079 }
2080
2081
2082 /*
2083 **
2084 ** MSIX Interrupt Handlers and Tasklets
2085 **
2086 */
2087 static void
2088 ixlv_handle_que(void *context, int pending)
2089 {
2090         struct ixl_queue *que = context;
2091         struct ixl_vsi *vsi = que->vsi;
2092         struct i40e_hw  *hw = vsi->hw;
2093         struct tx_ring  *txr = &que->txr;
2094         struct ifnet    *ifp = vsi->ifp;
2095         bool            more;
2096
2097         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2098                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2099                 mtx_lock(&txr->mtx);
2100                 ixl_txeof(que);
2101                 if (!drbr_empty(ifp, txr->br))
2102                         ixl_mq_start_locked(ifp, txr);
2103                 mtx_unlock(&txr->mtx);
2104                 if (more) {
2105                         taskqueue_enqueue(que->tq, &que->task);
2106                         return;
2107                 }
2108         }
2109
2110         /* Reenable this interrupt - hmmm */
2111         ixlv_enable_queue_irq(hw, que->me);
2112         return;
2113 }
2114
2115
2116 /*********************************************************************
2117  *
2118  *  MSIX Queue Interrupt Service routine
2119  *
2120  **********************************************************************/
2121 static void
2122 ixlv_msix_que(void *arg)
2123 {
2124         struct ixl_queue        *que = arg;
2125         struct ixl_vsi  *vsi = que->vsi;
2126         struct i40e_hw  *hw = vsi->hw;
2127         struct tx_ring  *txr = &que->txr;
2128         bool            more_tx, more_rx;
2129
2130         /* Spurious interrupts are ignored */
2131         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2132                 return;
2133
2134         ++que->irqs;
2135
2136         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2137
2138         mtx_lock(&txr->mtx);
2139         more_tx = ixl_txeof(que);
2140         /*
2141         ** Make certain that if the stack 
2142         ** has anything queued the task gets
2143         ** scheduled to handle it.
2144         */
2145         if (!drbr_empty(vsi->ifp, txr->br))
2146                 more_tx = 1;
2147         mtx_unlock(&txr->mtx);
2148
2149         ixlv_set_queue_rx_itr(que);
2150         ixlv_set_queue_tx_itr(que);
2151
2152         if (more_tx || more_rx)
2153                 taskqueue_enqueue(que->tq, &que->task);
2154         else
2155                 ixlv_enable_queue_irq(hw, que->me);
2156
2157         return;
2158 }
2159
2160
2161 /*********************************************************************
2162  *
2163  *  Media Ioctl callback
2164  *
2165  *  This routine is called whenever the user queries the status of
2166  *  the interface using ifconfig.
2167  *
2168  **********************************************************************/
2169 static void
2170 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2171 {
2172         struct ixl_vsi          *vsi = ifp->if_softc;
2173         struct ixlv_sc  *sc = vsi->back;
2174
2175         INIT_DBG_IF(ifp, "begin");
2176
2177         mtx_lock(&sc->mtx);
2178
2179         ixlv_update_link_status(sc);
2180
2181         ifmr->ifm_status = IFM_AVALID;
2182         ifmr->ifm_active = IFM_ETHER;
2183
2184         if (!vsi->link_up) {
2185                 mtx_unlock(&sc->mtx);
2186                 INIT_DBG_IF(ifp, "end: link not up");
2187                 return;
2188         }
2189
2190         ifmr->ifm_status |= IFM_ACTIVE;
2191         /* Hardware is always full-duplex */
2192         ifmr->ifm_active |= IFM_FDX;
2193         mtx_unlock(&sc->mtx);
2194         INIT_DBG_IF(ifp, "end");
2195         return;
2196 }
2197
2198 /*********************************************************************
2199  *
2200  *  Media Ioctl callback
2201  *
2202  *  This routine is called when the user changes speed/duplex using
2203  *  media/mediopt option with ifconfig.
2204  *
2205  **********************************************************************/
2206 static int
2207 ixlv_media_change(struct ifnet * ifp)
2208 {
2209         struct ixl_vsi *vsi = ifp->if_softc;
2210         struct ifmedia *ifm = &vsi->media;
2211
2212         INIT_DBG_IF(ifp, "begin");
2213
2214         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2215                 return (EINVAL);
2216
2217         INIT_DBG_IF(ifp, "end");
2218         return (0);
2219 }
2220
2221
2222 /*********************************************************************
2223  *  Multicast Initialization
2224  *
2225  *  This routine is called by init to reset a fresh state.
2226  *
2227  **********************************************************************/
2228
2229 static void
2230 ixlv_init_multi(struct ixl_vsi *vsi)
2231 {
2232         struct ixlv_mac_filter *f;
2233         struct ixlv_sc  *sc = vsi->back;
2234         int                     mcnt = 0;
2235
2236         IOCTL_DBG_IF(vsi->ifp, "begin");
2237
2238         /* First clear any multicast filters */
2239         SLIST_FOREACH(f, sc->mac_filters, next) {
2240                 if ((f->flags & IXL_FILTER_USED)
2241                     && (f->flags & IXL_FILTER_MC)) {
2242                         f->flags |= IXL_FILTER_DEL;
2243                         mcnt++;
2244                 }
2245         }
2246         if (mcnt > 0)
2247                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2248                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2249                     sc);
2250
2251         IOCTL_DBG_IF(vsi->ifp, "end");
2252 }
2253
2254 static void
2255 ixlv_add_multi(struct ixl_vsi *vsi)
2256 {
2257         struct ifmultiaddr      *ifma;
2258         struct ifnet            *ifp = vsi->ifp;
2259         struct ixlv_sc  *sc = vsi->back;
2260         int                     mcnt = 0;
2261
2262         IOCTL_DBG_IF(ifp, "begin");
2263
2264         if_maddr_rlock(ifp);
2265         /*
2266         ** Get a count, to decide if we
2267         ** simply use multicast promiscuous.
2268         */
2269         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2270                 if (ifma->ifma_addr->sa_family != AF_LINK)
2271                         continue;
2272                 mcnt++;
2273         }
2274         if_maddr_runlock(ifp);
2275
2276         // TODO: Remove -- cannot set promiscuous mode in a VF
2277         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2278                 /* delete all multicast filters */
2279                 ixlv_init_multi(vsi);
2280                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2281                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2282                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2283                     sc);
2284                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2285                 return;
2286         }
2287
2288         mcnt = 0;
2289         if_maddr_rlock(ifp);
2290         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2291                 if (ifma->ifma_addr->sa_family != AF_LINK)
2292                         continue;
2293                 if (!ixlv_add_mac_filter(sc,
2294                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2295                     IXL_FILTER_MC))
2296                         mcnt++;
2297         }
2298         if_maddr_runlock(ifp);
2299         /*
2300         ** Notify AQ task that sw filters need to be
2301         ** added to hw list
2302         */
2303         if (mcnt > 0)
2304                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2305                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2306                     sc);
2307
2308         IOCTL_DBG_IF(ifp, "end");
2309 }
2310
2311 static void
2312 ixlv_del_multi(struct ixl_vsi *vsi)
2313 {
2314         struct ixlv_mac_filter *f;
2315         struct ifmultiaddr      *ifma;
2316         struct ifnet            *ifp = vsi->ifp;
2317         struct ixlv_sc  *sc = vsi->back;
2318         int                     mcnt = 0;
2319         bool            match = FALSE;
2320
2321         IOCTL_DBG_IF(ifp, "begin");
2322
2323         /* Search for removed multicast addresses */
2324         if_maddr_rlock(ifp);
2325         SLIST_FOREACH(f, sc->mac_filters, next) {
2326                 if ((f->flags & IXL_FILTER_USED)
2327                     && (f->flags & IXL_FILTER_MC)) {
2328                         /* check if mac address in filter is in sc's list */
2329                         match = FALSE;
2330                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2331                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2332                                         continue;
2333                                 u8 *mc_addr =
2334                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2335                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2336                                         match = TRUE;
2337                                         break;
2338                                 }
2339                         }
2340                         /* if this filter is not in the sc's list, remove it */
2341                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2342                                 f->flags |= IXL_FILTER_DEL;
2343                                 mcnt++;
2344                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2345                                     MAC_FORMAT_ARGS(f->macaddr));
2346                         }
2347                         else if (match == FALSE)
2348                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2349                                     MAC_FORMAT_ARGS(f->macaddr));
2350                 }
2351         }
2352         if_maddr_runlock(ifp);
2353
2354         if (mcnt > 0)
2355                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2356                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2357                     sc);
2358
2359         IOCTL_DBG_IF(ifp, "end");
2360 }
2361
2362 /*********************************************************************
2363  *  Timer routine
2364  *
2365  *  This routine checks for link status,updates statistics,
2366  *  and runs the watchdog check.
2367  *
2368  **********************************************************************/
2369
2370 static void
2371 ixlv_local_timer(void *arg)
2372 {
2373         struct ixlv_sc  *sc = arg;
2374         struct i40e_hw          *hw = &sc->hw;
2375         struct ixl_vsi          *vsi = &sc->vsi;
2376         struct ixl_queue        *que = vsi->queues;
2377         device_t                dev = sc->dev;
2378         int                     hung = 0;
2379         u32                     mask, val;
2380
2381         IXLV_CORE_LOCK_ASSERT(sc);
2382
2383         /* If Reset is in progress just bail */
2384         if (sc->init_state == IXLV_RESET_PENDING)
2385                 return;
2386
2387         /* Check for when PF triggers a VF reset */
2388         val = rd32(hw, I40E_VFGEN_RSTAT) &
2389             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2390
2391         if (val != I40E_VFR_VFACTIVE
2392             && val != I40E_VFR_COMPLETED) {
2393                 DDPRINTF(dev, "reset in progress! (%d)", val);
2394                 return;
2395         }
2396
2397         ixlv_request_stats(sc);
2398
2399         /* clean and process any events */
2400         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2401
2402         /*
2403         ** Check status on the queues for a hang
2404         */
2405         mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2406             I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2407
2408         for (int i = 0; i < vsi->num_queues; i++,que++) {
2409                 /* Any queues with outstanding work get a sw irq */
2410                 if (que->busy)
2411                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2412                 /*
2413                 ** Each time txeof runs without cleaning, but there
2414                 ** are uncleaned descriptors it increments busy. If
2415                 ** we get to 5 we declare it hung.
2416                 */
2417                 if (que->busy == IXL_QUEUE_HUNG) {
2418                         ++hung;
2419                         /* Mark the queue as inactive */
2420                         vsi->active_queues &= ~((u64)1 << que->me);
2421                         continue;
2422                 } else {
2423                         /* Check if we've come back from hung */
2424                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2425                                 vsi->active_queues |= ((u64)1 << que->me);
2426                 }
2427                 if (que->busy >= IXL_MAX_TX_BUSY) {
2428                         device_printf(dev,"Warning queue %d "
2429                             "appears to be hung!\n", i);
2430                         que->busy = IXL_QUEUE_HUNG;
2431                         ++hung;
2432                 }
2433         }
2434         /* Only reset when all queues show hung */
2435         if (hung == vsi->num_queues)
2436                 goto hung;
2437         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2438         return;
2439
2440 hung:
2441         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2442         sc->init_state = IXLV_RESET_REQUIRED;
2443         ixlv_init_locked(sc);
2444 }
2445
2446 /*
2447 ** Note: this routine updates the OS on the link state
2448 **      the real check of the hardware only happens with
2449 **      a link interrupt.
2450 */
2451 void
2452 ixlv_update_link_status(struct ixlv_sc *sc)
2453 {
2454         struct ixl_vsi          *vsi = &sc->vsi;
2455         struct ifnet            *ifp = vsi->ifp;
2456         device_t                 dev = sc->dev;
2457
2458         if (vsi->link_up){ 
2459                 if (vsi->link_active == FALSE) {
2460                         if (bootverbose)
2461                                 device_printf(dev,"Link is Up, %d Gbps\n",
2462                                     (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2463                         vsi->link_active = TRUE;
2464                         if_link_state_change(ifp, LINK_STATE_UP);
2465                 }
2466         } else { /* Link down */
2467                 if (vsi->link_active == TRUE) {
2468                         if (bootverbose)
2469                                 device_printf(dev,"Link is Down\n");
2470                         if_link_state_change(ifp, LINK_STATE_DOWN);
2471                         vsi->link_active = FALSE;
2472                 }
2473         }
2474
2475         return;
2476 }
2477
2478 /*********************************************************************
2479  *
2480  *  This routine disables all traffic on the adapter by issuing a
2481  *  global reset on the MAC and deallocates TX/RX buffers.
2482  *
2483  **********************************************************************/
2484
2485 static void
2486 ixlv_stop(struct ixlv_sc *sc)
2487 {
2488         struct ifnet *ifp;
2489         int start;
2490
2491         ifp = sc->vsi.ifp;
2492         INIT_DBG_IF(ifp, "begin");
2493
2494         IXLV_CORE_LOCK_ASSERT(sc);
2495
2496         ixl_vc_flush(&sc->vc_mgr);
2497         ixlv_disable_queues(sc);
2498
2499         start = ticks;
2500         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2501             ((ticks - start) < hz/10))
2502                 ixlv_do_adminq_locked(sc);
2503
2504         /* Stop the local timer */
2505         callout_stop(&sc->timer);
2506
2507         INIT_DBG_IF(ifp, "end");
2508 }
2509
2510
2511 /*********************************************************************
2512  *
2513  *  Free all station queue structs.
2514  *
2515  **********************************************************************/
2516 static void
2517 ixlv_free_queues(struct ixl_vsi *vsi)
2518 {
2519         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2520         struct ixl_queue        *que = vsi->queues;
2521
2522         for (int i = 0; i < vsi->num_queues; i++, que++) {
2523                 struct tx_ring *txr = &que->txr;
2524                 struct rx_ring *rxr = &que->rxr;
2525         
2526                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2527                         continue;
2528                 IXL_TX_LOCK(txr);
2529                 ixl_free_que_tx(que);
2530                 if (txr->base)
2531                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2532                 IXL_TX_UNLOCK(txr);
2533                 IXL_TX_LOCK_DESTROY(txr);
2534
2535                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2536                         continue;
2537                 IXL_RX_LOCK(rxr);
2538                 ixl_free_que_rx(que);
2539                 if (rxr->base)
2540                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2541                 IXL_RX_UNLOCK(rxr);
2542                 IXL_RX_LOCK_DESTROY(rxr);
2543                 
2544         }
2545         free(vsi->queues, M_DEVBUF);
2546 }
2547
2548
2549 /*
2550 ** ixlv_config_rss - setup RSS 
2551 **
2552 ** RSS keys and table are cleared on VF reset.
2553 */
2554 static void
2555 ixlv_config_rss(struct ixlv_sc *sc)
2556 {
2557         struct i40e_hw  *hw = &sc->hw;
2558         struct ixl_vsi  *vsi = &sc->vsi;
2559         u32             lut = 0;
2560         u64             set_hena = 0, hena;
2561         int             i, j, que_id;
2562 #ifdef RSS
2563         u32             rss_hash_config;
2564         u32             rss_seed[IXL_KEYSZ];
2565 #else
2566         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2567                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2568                             0x35897377, 0x328b25e1, 0x4fa98922,
2569                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2570 #endif
2571         
2572         /* Don't set up RSS if using a single queue */
2573         if (vsi->num_queues == 1) {
2574                 wr32(hw, I40E_VFQF_HENA(0), 0);
2575                 wr32(hw, I40E_VFQF_HENA(1), 0);
2576                 ixl_flush(hw);
2577                 return;
2578         }
2579
2580 #ifdef RSS
2581         /* Fetch the configured RSS key */
2582         rss_getkey((uint8_t *) &rss_seed);
2583 #endif
2584         /* Fill out hash function seed */
2585         for (i = 0; i <= IXL_KEYSZ; i++)
2586                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2587
2588         /* Enable PCTYPES for RSS: */
2589 #ifdef RSS
2590         rss_hash_config = rss_gethashconfig();
2591         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2592                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2593         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2594                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2595         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2596                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2597         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2598                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2599         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2600                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2601         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2602                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2603         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2604                 set_hena |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2605 #else
2606         set_hena =
2607                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2608                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2609                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2610                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2611                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2612                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2613                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2614                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2615                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2616                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2617                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2618 #endif
2619         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2620             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2621         hena |= set_hena;
2622         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2623         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2624
2625         /* Populate the LUT with max no. of queues in round robin fashion */
2626         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2627                 if (j == vsi->num_queues)
2628                         j = 0;
2629 #ifdef RSS
2630                 /*
2631                  * Fetch the RSS bucket id for the given indirection entry.
2632                  * Cap it at the number of configured buckets (which is
2633                  * num_queues.)
2634                  */
2635                 que_id = rss_get_indirection_to_bucket(i);
2636                 que_id = que_id % adapter->num_queues;
2637 #else
2638                 que_id = j;
2639 #endif
2640                 /* lut = 4-byte sliding window of 4 lut entries */
2641                 lut = (lut << 8) | (que_id & 0xF);
2642                 /* On i = 3, we have 4 entries in lut; write to the register */
2643                 if ((i & 3) == 3) {
2644                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2645                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2646                 }
2647         }
2648         ixl_flush(hw);
2649 }
2650
2651
2652 /*
2653 ** This routine refreshes vlan filters, called by init
2654 ** it scans the filter table and then updates the AQ
2655 */
2656 static void
2657 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2658 {
2659         struct ixl_vsi                  *vsi = &sc->vsi;
2660         struct ixlv_vlan_filter *f;
2661         int                             cnt = 0;
2662
2663         if (vsi->num_vlans == 0)
2664                 return;
2665         /*
2666         ** Scan the filter table for vlan entries,
2667         ** and if found call for the AQ update.
2668         */
2669         SLIST_FOREACH(f, sc->vlan_filters, next)
2670                 if (f->flags & IXL_FILTER_ADD)
2671                         cnt++;
2672         if (cnt > 0)
2673                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2674                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2675 }
2676
2677
2678 /*
2679 ** This routine adds new MAC filters to the sc's list;
2680 ** these are later added in hardware by sending a virtual
2681 ** channel message.
2682 */
2683 static int
2684 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2685 {
2686         struct ixlv_mac_filter  *f;
2687         device_t                        dev = sc->dev;
2688
2689         /* Does one already exist? */
2690         f = ixlv_find_mac_filter(sc, macaddr);
2691         if (f != NULL) {
2692                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2693                     MAC_FORMAT_ARGS(macaddr));
2694                 return (EEXIST);
2695         }
2696
2697         /* If not, get a new empty filter */
2698         f = ixlv_get_mac_filter(sc);
2699         if (f == NULL) {
2700                 device_printf(dev, "%s: no filters available!!\n",
2701                     __func__);
2702                 return (ENOMEM);
2703         }
2704
2705         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2706             MAC_FORMAT_ARGS(macaddr));
2707
2708         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2709         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2710         f->flags |= flags;
2711         return (0);
2712 }
2713
2714 /*
2715 ** Marks a MAC filter for deletion.
2716 */
2717 static int
2718 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2719 {
2720         struct ixlv_mac_filter  *f;
2721
2722         f = ixlv_find_mac_filter(sc, macaddr);
2723         if (f == NULL)
2724                 return (ENOENT);
2725
2726         f->flags |= IXL_FILTER_DEL;
2727         return (0);
2728 }
2729
2730 /*
2731 ** Tasklet handler for MSIX Adminq interrupts
2732 **  - done outside interrupt context since it might sleep
2733 */
2734 static void
2735 ixlv_do_adminq(void *context, int pending)
2736 {
2737         struct ixlv_sc          *sc = context;
2738
2739         mtx_lock(&sc->mtx);
2740         ixlv_do_adminq_locked(sc);
2741         mtx_unlock(&sc->mtx);
2742         return;
2743 }
2744
2745 static void
2746 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2747 {
2748         struct i40e_hw                  *hw = &sc->hw;
2749         struct i40e_arq_event_info      event;
2750         struct i40e_virtchnl_msg        *v_msg;
2751         i40e_status                     ret;
2752         u16                             result = 0;
2753
2754         IXLV_CORE_LOCK_ASSERT(sc);
2755
2756         event.buf_len = IXL_AQ_BUF_SZ;
2757         event.msg_buf = sc->aq_buffer;
2758         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2759
2760         do {
2761                 ret = i40e_clean_arq_element(hw, &event, &result);
2762                 if (ret)
2763                         break;
2764                 ixlv_vc_completion(sc, v_msg->v_opcode,
2765                     v_msg->v_retval, event.msg_buf, event.msg_len);
2766                 if (result != 0)
2767                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2768         } while (result);
2769
2770         ixlv_enable_adminq_irq(hw);
2771 }
2772
2773 static void
2774 ixlv_add_sysctls(struct ixlv_sc *sc)
2775 {
2776         device_t dev = sc->dev;
2777         struct ixl_vsi *vsi = &sc->vsi;
2778         struct i40e_eth_stats *es = &vsi->eth_stats;
2779
2780         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2781         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2782         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2783
2784         struct sysctl_oid *vsi_node, *queue_node;
2785         struct sysctl_oid_list *vsi_list, *queue_list;
2786
2787 #define QUEUE_NAME_LEN 32
2788         char queue_namebuf[QUEUE_NAME_LEN];
2789
2790         struct ixl_queue *queues = vsi->queues;
2791         struct tx_ring *txr;
2792         struct rx_ring *rxr;
2793
2794         /* Driver statistics sysctls */
2795         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2796                         CTLFLAG_RD, &sc->watchdog_events,
2797                         "Watchdog timeouts");
2798         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2799                         CTLFLAG_RD, &sc->admin_irq,
2800                         "Admin Queue IRQ Handled");
2801
2802         /* VSI statistics sysctls */
2803         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2804                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2805         vsi_list = SYSCTL_CHILDREN(vsi_node);
2806
2807         struct ixl_sysctl_info ctls[] =
2808         {
2809                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2810                 {&es->rx_unicast, "ucast_pkts_rcvd",
2811                         "Unicast Packets Received"},
2812                 {&es->rx_multicast, "mcast_pkts_rcvd",
2813                         "Multicast Packets Received"},
2814                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2815                         "Broadcast Packets Received"},
2816                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2817                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2818                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2819                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2820                 {&es->tx_multicast, "mcast_pkts_txd",
2821                         "Multicast Packets Transmitted"},
2822                 {&es->tx_broadcast, "bcast_pkts_txd",
2823                         "Broadcast Packets Transmitted"},
2824                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2825                 // end
2826                 {0,0,0}
2827         };
2828         struct ixl_sysctl_info *entry = ctls;
2829         while (entry->stat != 0)
2830         {
2831                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2832                                 CTLFLAG_RD, entry->stat,
2833                                 entry->description);
2834                 entry++;
2835         }
2836
2837         /* Queue sysctls */
2838         for (int q = 0; q < vsi->num_queues; q++) {
2839                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2840                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2841                                              CTLFLAG_RD, NULL, "Queue Name");
2842                 queue_list = SYSCTL_CHILDREN(queue_node);
2843
2844                 txr = &(queues[q].txr);
2845                 rxr = &(queues[q].rxr);
2846
2847                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2848                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2849                                 "m_defrag() failed");
2850                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2851                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2852                                 "Driver dropped packets");
2853                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2854                                 CTLFLAG_RD, &(queues[q].irqs),
2855                                 "irqs on this queue");
2856                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2857                                 CTLFLAG_RD, &(queues[q].tso),
2858                                 "TSO");
2859                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2860                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2861                                 "Driver tx dma failure in xmit");
2862                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2863                                 CTLFLAG_RD, &(txr->no_desc),
2864                                 "Queue No Descriptor Available");
2865                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2866                                 CTLFLAG_RD, &(txr->total_packets),
2867                                 "Queue Packets Transmitted");
2868                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2869                                 CTLFLAG_RD, &(txr->tx_bytes),
2870                                 "Queue Bytes Transmitted");
2871                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2872                                 CTLFLAG_RD, &(rxr->rx_packets),
2873                                 "Queue Packets Received");
2874                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2875                                 CTLFLAG_RD, &(rxr->rx_bytes),
2876                                 "Queue Bytes Received");
2877
2878                 /* Examine queue state */
2879                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2880                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2881                                 sizeof(struct ixl_queue),
2882                                 ixlv_sysctl_qtx_tail_handler, "IU",
2883                                 "Queue Transmit Descriptor Tail");
2884                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2885                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2886                                 sizeof(struct ixl_queue),
2887                                 ixlv_sysctl_qrx_tail_handler, "IU",
2888                                 "Queue Receive Descriptor Tail");
2889         }
2890 }
2891
2892 static void
2893 ixlv_init_filters(struct ixlv_sc *sc)
2894 {
2895         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2896             M_DEVBUF, M_NOWAIT | M_ZERO);
2897         SLIST_INIT(sc->mac_filters);
2898         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2899             M_DEVBUF, M_NOWAIT | M_ZERO);
2900         SLIST_INIT(sc->vlan_filters);
2901         return;
2902 }
2903
2904 static void
2905 ixlv_free_filters(struct ixlv_sc *sc)
2906 {
2907         struct ixlv_mac_filter *f;
2908         struct ixlv_vlan_filter *v;
2909
2910         while (!SLIST_EMPTY(sc->mac_filters)) {
2911                 f = SLIST_FIRST(sc->mac_filters);
2912                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2913                 free(f, M_DEVBUF);
2914         }
2915         while (!SLIST_EMPTY(sc->vlan_filters)) {
2916                 v = SLIST_FIRST(sc->vlan_filters);
2917                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2918                 free(v, M_DEVBUF);
2919         }
2920         return;
2921 }
2922
2923 /**
2924  * ixlv_sysctl_qtx_tail_handler
2925  * Retrieves I40E_QTX_TAIL1 value from hardware
2926  * for a sysctl.
2927  */
2928 static int 
2929 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2930 {
2931         struct ixl_queue *que;
2932         int error;
2933         u32 val;
2934
2935         que = ((struct ixl_queue *)oidp->oid_arg1);
2936         if (!que) return 0;
2937
2938         val = rd32(que->vsi->hw, que->txr.tail);
2939         error = sysctl_handle_int(oidp, &val, 0, req);
2940         if (error || !req->newptr)
2941                 return error;
2942         return (0);
2943 }
2944
2945 /**
2946  * ixlv_sysctl_qrx_tail_handler
2947  * Retrieves I40E_QRX_TAIL1 value from hardware
2948  * for a sysctl.
2949  */
2950 static int 
2951 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2952 {
2953         struct ixl_queue *que;
2954         int error;
2955         u32 val;
2956
2957         que = ((struct ixl_queue *)oidp->oid_arg1);
2958         if (!que) return 0;
2959
2960         val = rd32(que->vsi->hw, que->rxr.tail);
2961         error = sysctl_handle_int(oidp, &val, 0, req);
2962         if (error || !req->newptr)
2963                 return error;
2964         return (0);
2965 }
2966