]> CyberLeo.Net >> Repos - FreeBSD/releng/10.1.git/blob - sys/dev/ixl/if_ixlv.c
Update the Intel ixl/ixlv drivers to fix a panic in the boot/install
[FreeBSD/releng/10.1.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixlv.h"
39
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 char ixlv_driver_version[] = "1.1.18";
44
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *  Last field stores an index into ixlv_strings
50  *  Last entry must be all 0s
51  *
52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53  *********************************************************************/
54
55 static ixl_vendor_info_t ixlv_vendor_info_array[] =
56 {
57         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
58         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
59         /* required last entry */
60         {0, 0, 0, 0, 0}
61 };
62
63 /*********************************************************************
64  *  Table of branding strings
65  *********************************************************************/
66
67 static char    *ixlv_strings[] = {
68         "Intel(R) Ethernet Connection XL710 VF Driver"
69 };
70
71
72 /*********************************************************************
73  *  Function prototypes
74  *********************************************************************/
75 static int      ixlv_probe(device_t);
76 static int      ixlv_attach(device_t);
77 static int      ixlv_detach(device_t);
78 static int      ixlv_shutdown(device_t);
79 static void     ixlv_init_locked(struct ixlv_sc *);
80 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
81 static void     ixlv_free_pci_resources(struct ixlv_sc *);
82 static int      ixlv_assign_msix(struct ixlv_sc *);
83 static int      ixlv_init_msix(struct ixlv_sc *);
84 static int      ixlv_init_taskqueue(struct ixlv_sc *);
85 static int      ixlv_setup_queues(struct ixlv_sc *);
86 static void     ixlv_config_rss(struct ixlv_sc *);
87 static void     ixlv_stop(struct ixlv_sc *);
88 static void     ixlv_add_multi(struct ixl_vsi *);
89 static void     ixlv_del_multi(struct ixl_vsi *);
90 static void     ixlv_free_queues(struct ixl_vsi *);
91 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
92
93 static int      ixlv_media_change(struct ifnet *);
94 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
95
96 static void     ixlv_local_timer(void *);
97
98 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void     ixlv_init_filters(struct ixlv_sc *);
101 static void     ixlv_free_filters(struct ixlv_sc *);
102
103 static void     ixlv_msix_que(void *);
104 static void     ixlv_msix_adminq(void *);
105 static void     ixlv_do_adminq(void *, int);
106 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void     ixlv_handle_que(void *, int);
108 static int      ixlv_reset(struct ixlv_sc *);
109 static int      ixlv_reset_complete(struct i40e_hw *);
110 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113                     enum i40e_status_code);
114
115 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
116 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
117 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
118 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
119
120 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
121 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
122 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
123
124 static void     ixlv_init_hw(struct ixlv_sc *);
125 static int      ixlv_setup_vc(struct ixlv_sc *);
126 static int      ixlv_vf_config(struct ixlv_sc *);
127
128 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
129                     struct ifnet *, int);
130
131 static void     ixlv_add_sysctls(struct ixlv_sc *);
132 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
133 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
134
135 /*********************************************************************
136  *  FreeBSD Device Interface Entry Points
137  *********************************************************************/
138
139 static device_method_t ixlv_methods[] = {
140         /* Device interface */
141         DEVMETHOD(device_probe, ixlv_probe),
142         DEVMETHOD(device_attach, ixlv_attach),
143         DEVMETHOD(device_detach, ixlv_detach),
144         DEVMETHOD(device_shutdown, ixlv_shutdown),
145         {0, 0}
146 };
147
148 static driver_t ixlv_driver = {
149         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
150 };
151
152 devclass_t ixlv_devclass;
153 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
154
155 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
156 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
157
158 /*
159 ** TUNEABLE PARAMETERS:
160 */
161
162 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
163                    "IXLV driver parameters");
164
165 /*
166 ** Number of descriptors per ring:
167 **   - TX and RX are the same size
168 */
169 static int ixlv_ringsz = DEFAULT_RING;
170 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
171 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
172     &ixlv_ringsz, 0, "Descriptor Ring Size");
173
174 /* Set to zero to auto calculate  */
175 int ixlv_max_queues = 0;
176 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
177 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
178     &ixlv_max_queues, 0, "Number of Queues");
179
180 /*
181 ** Number of entries in Tx queue buf_ring.
182 ** Increasing this will reduce the number of
183 ** errors when transmitting fragmented UDP
184 ** packets.
185 */
186 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
187 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
188 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
189     &ixlv_txbrsz, 0, "TX Buf Ring Size");
190
191 /*
192 ** Controls for Interrupt Throttling
193 **      - true/false for dynamic adjustment
194 **      - default values for static ITR
195 */
196 int ixlv_dynamic_rx_itr = 0;
197 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
198 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
199     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
200
201 int ixlv_dynamic_tx_itr = 0;
202 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
203 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
204     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
205
206 int ixlv_rx_itr = IXL_ITR_8K;
207 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
208 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
209     &ixlv_rx_itr, 0, "RX Interrupt Rate");
210
211 int ixlv_tx_itr = IXL_ITR_4K;
212 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
213 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
214     &ixlv_tx_itr, 0, "TX Interrupt Rate");
215
216         
217 /*********************************************************************
218  *  Device identification routine
219  *
220  *  ixlv_probe determines if the driver should be loaded on
221  *  the hardware based on PCI vendor/device id of the device.
222  *
223  *  return BUS_PROBE_DEFAULT on success, positive on failure
224  *********************************************************************/
225
226 static int
227 ixlv_probe(device_t dev)
228 {
229         ixl_vendor_info_t *ent;
230
231         u16     pci_vendor_id, pci_device_id;
232         u16     pci_subvendor_id, pci_subdevice_id;
233         char    device_name[256];
234
235         INIT_DEBUGOUT("ixlv_probe: begin");
236
237         pci_vendor_id = pci_get_vendor(dev);
238         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
239                 return (ENXIO);
240
241         pci_device_id = pci_get_device(dev);
242         pci_subvendor_id = pci_get_subvendor(dev);
243         pci_subdevice_id = pci_get_subdevice(dev);
244
245         ent = ixlv_vendor_info_array;
246         while (ent->vendor_id != 0) {
247                 if ((pci_vendor_id == ent->vendor_id) &&
248                     (pci_device_id == ent->device_id) &&
249
250                     ((pci_subvendor_id == ent->subvendor_id) ||
251                      (ent->subvendor_id == 0)) &&
252
253                     ((pci_subdevice_id == ent->subdevice_id) ||
254                      (ent->subdevice_id == 0))) {
255                         sprintf(device_name, "%s, Version - %s",
256                                 ixlv_strings[ent->index],
257                                 ixlv_driver_version);
258                         device_set_desc_copy(dev, device_name);
259                         return (BUS_PROBE_DEFAULT);
260                 }
261                 ent++;
262         }
263         return (ENXIO);
264 }
265
266 /*********************************************************************
267  *  Device initialization routine
268  *
269  *  The attach entry point is called when the driver is being loaded.
270  *  This routine identifies the type of hardware, allocates all resources
271  *  and initializes the hardware.
272  *
273  *  return 0 on success, positive on failure
274  *********************************************************************/
275
276 static int
277 ixlv_attach(device_t dev)
278 {
279         struct ixlv_sc  *sc;
280         struct i40e_hw  *hw;
281         struct ixl_vsi  *vsi;
282         int             error = 0;
283
284         INIT_DBG_DEV(dev, "begin");
285
286         /* Allocate, clear, and link in our primary soft structure */
287         sc = device_get_softc(dev);
288         sc->dev = sc->osdep.dev = dev;
289         hw = &sc->hw;
290         vsi = &sc->vsi;
291         vsi->dev = dev;
292
293         /* Initialize hw struct */
294         ixlv_init_hw(sc);
295
296         /* Allocate filter lists */
297         ixlv_init_filters(sc);
298
299         /* Core Lock Init*/
300         mtx_init(&sc->mtx, device_get_nameunit(dev),
301             "IXL SC Lock", MTX_DEF);
302
303         /* Set up the timer callout */
304         callout_init_mtx(&sc->timer, &sc->mtx, 0);
305
306         /* Do PCI setup - map BAR0, etc */
307         if (ixlv_allocate_pci_resources(sc)) {
308                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
309                     __func__);
310                 error = ENXIO;
311                 goto err_early;
312         }
313
314         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
315
316         error = i40e_set_mac_type(hw);
317         if (error) {
318                 device_printf(dev, "%s: set_mac_type failed: %d\n",
319                     __func__, error);
320                 goto err_pci_res;
321         }
322
323         error = ixlv_reset_complete(hw);
324         if (error) {
325                 device_printf(dev, "%s: Device is still being reset\n",
326                     __func__);
327                 goto err_pci_res;
328         }
329
330         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
331
332         error = ixlv_setup_vc(sc);
333         if (error) {
334                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
335                     __func__, error);
336                 goto err_pci_res;
337         }
338
339         INIT_DBG_DEV(dev, "PF API version verified");
340
341         /* TODO: Figure out why MDD events occur when this reset is removed. */
342         /* Need API version before sending reset message */
343         error = ixlv_reset(sc);
344         if (error) {
345                 device_printf(dev, "VF reset failed; reload the driver\n");
346                 goto err_aq;
347         }
348
349         INIT_DBG_DEV(dev, "VF reset complete");
350
351         /* Ask for VF config from PF */
352         error = ixlv_vf_config(sc);
353         if (error) {
354                 device_printf(dev, "Error getting configuration from PF: %d\n",
355                     error);
356                 goto err_aq;
357         }
358
359         INIT_DBG_DEV(dev, "VF config from PF:");
360         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
361             sc->vf_res->num_vsis,
362             sc->vf_res->num_queue_pairs,
363             sc->vf_res->max_vectors,
364             sc->vf_res->max_mtu);
365         INIT_DBG_DEV(dev, "Offload flags: %#010x",
366             sc->vf_res->vf_offload_flags);
367
368         // TODO: Move this into ixlv_vf_config?
369         /* got VF config message back from PF, now we can parse it */
370         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
371                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
372                         sc->vsi_res = &sc->vf_res->vsi_res[i];
373         }
374         if (!sc->vsi_res) {
375                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
376                 error = EIO;
377                 goto err_res_buf;
378         }
379
380         INIT_DBG_DEV(dev, "Resource Acquisition complete");
381
382         /* If no mac address was assigned just make a random one */
383         if (!ixlv_check_ether_addr(hw->mac.addr)) {
384                 u8 addr[ETHER_ADDR_LEN];
385                 arc4rand(&addr, sizeof(addr), 0);
386                 addr[0] &= 0xFE;
387                 addr[0] |= 0x02;
388                 bcopy(addr, hw->mac.addr, sizeof(addr));
389         }
390
391         vsi->id = sc->vsi_res->vsi_id;
392         vsi->back = (void *)sc;
393         vsi->link_up = TRUE;
394
395         /* This allocates the memory and early settings */
396         if (ixlv_setup_queues(sc) != 0) {
397                 device_printf(dev, "%s: setup queues failed!\n",
398                     __func__);
399                 error = EIO;
400                 goto out;
401         }
402
403         /* Setup the stack interface */
404         if (ixlv_setup_interface(dev, sc) != 0) {
405                 device_printf(dev, "%s: setup interface failed!\n",
406                     __func__);
407                 error = EIO;
408                 goto out;
409         }
410
411         INIT_DBG_DEV(dev, "Queue memory and interface setup");
412
413         /* Do queue interrupt setup */
414         ixlv_assign_msix(sc);
415
416         /* Start AdminQ taskqueue */
417         ixlv_init_taskqueue(sc);
418
419         /* Initialize stats */
420         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
421         ixlv_add_sysctls(sc);
422
423         /* Register for VLAN events */
424         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
425             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
426         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
427             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
428
429         /* We want AQ enabled early */
430         ixlv_enable_adminq_irq(hw);
431
432         /* Set things up to run init */
433         sc->init_state = IXLV_INIT_READY;
434
435         ixl_vc_init_mgr(sc, &sc->vc_mgr);
436
437         INIT_DBG_DEV(dev, "end");
438         return (error);
439
440 out:
441         ixlv_free_queues(vsi);
442 err_res_buf:
443         free(sc->vf_res, M_DEVBUF);
444 err_aq:
445         i40e_shutdown_adminq(hw);
446 err_pci_res:
447         ixlv_free_pci_resources(sc);
448 err_early:
449         mtx_destroy(&sc->mtx);
450         ixlv_free_filters(sc);
451         INIT_DBG_DEV(dev, "end: error %d", error);
452         return (error);
453 }
454
455 /*********************************************************************
456  *  Device removal routine
457  *
458  *  The detach entry point is called when the driver is being removed.
459  *  This routine stops the adapter and deallocates all the resources
460  *  that were allocated for driver operation.
461  *
462  *  return 0 on success, positive on failure
463  *********************************************************************/
464
465 static int
466 ixlv_detach(device_t dev)
467 {
468         struct ixlv_sc  *sc = device_get_softc(dev);
469         struct ixl_vsi  *vsi = &sc->vsi;
470
471         INIT_DBG_DEV(dev, "begin");
472
473         /* Make sure VLANS are not using driver */
474         if (vsi->ifp->if_vlantrunk != NULL) {
475                 device_printf(dev, "Vlan in use, detach first\n");
476                 INIT_DBG_DEV(dev, "end");
477                 return (EBUSY);
478         }
479
480         /* Stop driver */
481         ether_ifdetach(vsi->ifp);
482         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
483                 mtx_lock(&sc->mtx);     
484                 ixlv_stop(sc);
485                 mtx_unlock(&sc->mtx);   
486         }
487
488         /* Unregister VLAN events */
489         if (vsi->vlan_attach != NULL)
490                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
491         if (vsi->vlan_detach != NULL)
492                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
493
494         /* Drain VC mgr */
495         callout_drain(&sc->vc_mgr.callout);
496
497         i40e_shutdown_adminq(&sc->hw);
498         taskqueue_free(sc->tq);
499         if_free(vsi->ifp);
500         free(sc->vf_res, M_DEVBUF);
501         ixlv_free_pci_resources(sc);
502         ixlv_free_queues(vsi);
503         mtx_destroy(&sc->mtx);
504         ixlv_free_filters(sc);
505
506         bus_generic_detach(dev);
507         INIT_DBG_DEV(dev, "end");
508         return (0);
509 }
510
511 /*********************************************************************
512  *
513  *  Shutdown entry point
514  *
515  **********************************************************************/
516
517 static int
518 ixlv_shutdown(device_t dev)
519 {
520         struct ixlv_sc  *sc = device_get_softc(dev);
521
522         INIT_DBG_DEV(dev, "begin");
523
524         mtx_lock(&sc->mtx);     
525         ixlv_stop(sc);
526         mtx_unlock(&sc->mtx);   
527
528         INIT_DBG_DEV(dev, "end");
529         return (0);
530 }
531
532 /*
533  * Configure TXCSUM(IPV6) and TSO(4/6)
534  *      - the hardware handles these together so we
535  *        need to tweak them 
536  */
537 static void
538 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
539 {
540         /* Enable/disable TXCSUM/TSO4 */
541         if (!(ifp->if_capenable & IFCAP_TXCSUM)
542             && !(ifp->if_capenable & IFCAP_TSO4)) {
543                 if (mask & IFCAP_TXCSUM) {
544                         ifp->if_capenable |= IFCAP_TXCSUM;
545                         /* enable TXCSUM, restore TSO if previously enabled */
546                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
547                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
548                                 ifp->if_capenable |= IFCAP_TSO4;
549                         }
550                 }
551                 else if (mask & IFCAP_TSO4) {
552                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
553                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
554                         if_printf(ifp,
555                             "TSO4 requires txcsum, enabling both...\n");
556                 }
557         } else if((ifp->if_capenable & IFCAP_TXCSUM)
558             && !(ifp->if_capenable & IFCAP_TSO4)) {
559                 if (mask & IFCAP_TXCSUM)
560                         ifp->if_capenable &= ~IFCAP_TXCSUM;
561                 else if (mask & IFCAP_TSO4)
562                         ifp->if_capenable |= IFCAP_TSO4;
563         } else if((ifp->if_capenable & IFCAP_TXCSUM)
564             && (ifp->if_capenable & IFCAP_TSO4)) {
565                 if (mask & IFCAP_TXCSUM) {
566                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
567                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
568                         if_printf(ifp, 
569                             "TSO4 requires txcsum, disabling both...\n");
570                 } else if (mask & IFCAP_TSO4)
571                         ifp->if_capenable &= ~IFCAP_TSO4;
572         }
573
574         /* Enable/disable TXCSUM_IPV6/TSO6 */
575         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
576             && !(ifp->if_capenable & IFCAP_TSO6)) {
577                 if (mask & IFCAP_TXCSUM_IPV6) {
578                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
579                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
580                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
581                                 ifp->if_capenable |= IFCAP_TSO6;
582                         }
583                 } else if (mask & IFCAP_TSO6) {
584                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
585                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
586                         if_printf(ifp,
587                             "TSO6 requires txcsum6, enabling both...\n");
588                 }
589         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
590             && !(ifp->if_capenable & IFCAP_TSO6)) {
591                 if (mask & IFCAP_TXCSUM_IPV6)
592                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
593                 else if (mask & IFCAP_TSO6)
594                         ifp->if_capenable |= IFCAP_TSO6;
595         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
596             && (ifp->if_capenable & IFCAP_TSO6)) {
597                 if (mask & IFCAP_TXCSUM_IPV6) {
598                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
599                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
600                         if_printf(ifp,
601                             "TSO6 requires txcsum6, disabling both...\n");
602                 } else if (mask & IFCAP_TSO6)
603                         ifp->if_capenable &= ~IFCAP_TSO6;
604         }
605 }
606
607 /*********************************************************************
608  *  Ioctl entry point
609  *
610  *  ixlv_ioctl is called when the user wants to configure the
611  *  interface.
612  *
613  *  return 0 on success, positive on failure
614  **********************************************************************/
615
616 static int
617 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
618 {
619         struct ixl_vsi          *vsi = ifp->if_softc;
620         struct ixlv_sc  *sc = vsi->back;
621         struct ifreq            *ifr = (struct ifreq *)data;
622 #if defined(INET) || defined(INET6)
623         struct ifaddr           *ifa = (struct ifaddr *)data;
624         bool                    avoid_reset = FALSE;
625 #endif
626         int                     error = 0;
627
628
629         switch (command) {
630
631         case SIOCSIFADDR:
632 #ifdef INET
633                 if (ifa->ifa_addr->sa_family == AF_INET)
634                         avoid_reset = TRUE;
635 #endif
636 #ifdef INET6
637                 if (ifa->ifa_addr->sa_family == AF_INET6)
638                         avoid_reset = TRUE;
639 #endif
640 #if defined(INET) || defined(INET6)
641                 /*
642                 ** Calling init results in link renegotiation,
643                 ** so we avoid doing it when possible.
644                 */
645                 if (avoid_reset) {
646                         ifp->if_flags |= IFF_UP;
647                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
648                                 ixlv_init(vsi);
649 #ifdef INET
650                         if (!(ifp->if_flags & IFF_NOARP))
651                                 arp_ifinit(ifp, ifa);
652 #endif
653                 } else
654                         error = ether_ioctl(ifp, command, data);
655                 break;
656 #endif
657         case SIOCSIFMTU:
658                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
659                 mtx_lock(&sc->mtx);
660                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
661                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
662                         error = EINVAL;
663                         IOCTL_DBG_IF(ifp, "mtu too large");
664                 } else {
665                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
666                         // ERJ: Interestingly enough, these types don't match
667                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
668                         vsi->max_frame_size =
669                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
670                             + ETHER_VLAN_ENCAP_LEN;
671                         ixlv_init_locked(sc);
672                 }
673                 mtx_unlock(&sc->mtx);
674                 break;
675         case SIOCSIFFLAGS:
676                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
677                 mtx_lock(&sc->mtx);
678                 if (ifp->if_flags & IFF_UP) {
679                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
680                                 ixlv_init_locked(sc);
681                 } else
682                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
683                                 ixlv_stop(sc);
684                 sc->if_flags = ifp->if_flags;
685                 mtx_unlock(&sc->mtx);
686                 break;
687         case SIOCADDMULTI:
688                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
689                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
690                         mtx_lock(&sc->mtx);
691                         ixlv_disable_intr(vsi);
692                         ixlv_add_multi(vsi);
693                         ixlv_enable_intr(vsi);
694                         mtx_unlock(&sc->mtx);
695                 }
696                 break;
697         case SIOCDELMULTI:
698                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
699                 if (sc->init_state == IXLV_RUNNING) {
700                         mtx_lock(&sc->mtx);
701                         ixlv_disable_intr(vsi);
702                         ixlv_del_multi(vsi);
703                         ixlv_enable_intr(vsi);
704                         mtx_unlock(&sc->mtx);
705                 }
706                 break;
707         case SIOCSIFMEDIA:
708         case SIOCGIFMEDIA:
709                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
710                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
711                 break;
712         case SIOCSIFCAP:
713         {
714                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
715                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
716
717                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
718
719                 if (mask & IFCAP_RXCSUM)
720                         ifp->if_capenable ^= IFCAP_RXCSUM;
721                 if (mask & IFCAP_RXCSUM_IPV6)
722                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
723                 if (mask & IFCAP_LRO)
724                         ifp->if_capenable ^= IFCAP_LRO;
725                 if (mask & IFCAP_VLAN_HWTAGGING)
726                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
727                 if (mask & IFCAP_VLAN_HWFILTER)
728                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
729                 if (mask & IFCAP_VLAN_HWTSO)
730                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
731                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
732                         ixlv_init(vsi);
733                 }
734                 VLAN_CAPABILITIES(ifp);
735
736                 break;
737         }
738
739         default:
740                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
741                 error = ether_ioctl(ifp, command, data);
742                 break;
743         }
744
745         return (error);
746 }
747
748 /*
749 ** To do a reinit on the VF is unfortunately more complicated
750 ** than a physical device, we must have the PF more or less
751 ** completely recreate our memory, so many things that were
752 ** done only once at attach in traditional drivers now must be
753 ** redone at each reinitialization. This function does that
754 ** 'prelude' so we can then call the normal locked init code.
755 */
756 int
757 ixlv_reinit_locked(struct ixlv_sc *sc)
758 {
759         struct i40e_hw          *hw = &sc->hw;
760         struct ixl_vsi          *vsi = &sc->vsi;
761         struct ifnet            *ifp = vsi->ifp;
762         struct ixlv_mac_filter  *mf, *mf_temp;
763         struct ixlv_vlan_filter *vf;
764         int                     error = 0;
765
766         INIT_DBG_IF(ifp, "begin");
767
768         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
769                 ixlv_stop(sc);
770
771         error = ixlv_reset(sc);
772
773         INIT_DBG_IF(ifp, "VF was reset");
774
775         /* set the state in case we went thru RESET */
776         sc->init_state = IXLV_RUNNING;
777
778         /*
779         ** Resetting the VF drops all filters from hardware;
780         ** we need to mark them to be re-added in init.
781         */
782         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
783                 if (mf->flags & IXL_FILTER_DEL) {
784                         SLIST_REMOVE(sc->mac_filters, mf,
785                             ixlv_mac_filter, next);
786                         free(mf, M_DEVBUF);
787                 } else
788                         mf->flags |= IXL_FILTER_ADD;
789         }
790         if (vsi->num_vlans != 0)
791                 SLIST_FOREACH(vf, sc->vlan_filters, next)
792                         vf->flags = IXL_FILTER_ADD;
793         else { /* clean any stale filters */
794                 while (!SLIST_EMPTY(sc->vlan_filters)) {
795                         vf = SLIST_FIRST(sc->vlan_filters);
796                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
797                         free(vf, M_DEVBUF);
798                 }
799         }
800
801         ixlv_enable_adminq_irq(hw);
802         ixl_vc_flush(&sc->vc_mgr);
803
804         INIT_DBG_IF(ifp, "end");
805         return (error);
806 }
807
808 static void
809 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
810         enum i40e_status_code code)
811 {
812         struct ixlv_sc *sc;
813
814         sc = arg;
815
816         /*
817          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
818          * happens while a command is in progress, so we don't print an error
819          * in that case.
820          */
821         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
822                 if_printf(sc->vsi.ifp,
823                     "Error %d waiting for PF to complete operation %d\n",
824                     code, cmd->request);
825         }
826 }
827
828 static void
829 ixlv_init_locked(struct ixlv_sc *sc)
830 {
831         struct i40e_hw          *hw = &sc->hw;
832         struct ixl_vsi          *vsi = &sc->vsi;
833         struct ixl_queue        *que = vsi->queues;
834         struct ifnet            *ifp = vsi->ifp;
835         int                      error = 0;
836
837         INIT_DBG_IF(ifp, "begin");
838
839         IXLV_CORE_LOCK_ASSERT(sc);
840
841         /* Do a reinit first if an init has already been done */
842         if ((sc->init_state == IXLV_RUNNING) ||
843             (sc->init_state == IXLV_RESET_REQUIRED) ||
844             (sc->init_state == IXLV_RESET_PENDING))
845                 error = ixlv_reinit_locked(sc);
846         /* Don't bother with init if we failed reinit */
847         if (error)
848                 goto init_done;
849
850         /* Remove existing MAC filter if new MAC addr is set */
851         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
852                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
853                 if (error == 0)
854                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
855                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
856                             sc);
857         }
858
859         /* Check for an LAA mac address... */
860         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
861
862         ifp->if_hwassist = 0;
863         if (ifp->if_capenable & IFCAP_TSO)
864                 ifp->if_hwassist |= CSUM_TSO;
865         if (ifp->if_capenable & IFCAP_TXCSUM)
866                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
867         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
868                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
869
870         /* Add mac filter for this VF to PF */
871         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
872                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
873                 if (!error || error == EEXIST)
874                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
875                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
876                             sc);
877         }
878
879         /* Setup vlan's if needed */
880         ixlv_setup_vlan_filters(sc);
881
882         /* Prepare the queues for operation */
883         for (int i = 0; i < vsi->num_queues; i++, que++) {
884                 struct  rx_ring *rxr = &que->rxr;
885
886                 ixl_init_tx_ring(que);
887
888                 if (vsi->max_frame_size <= 2048)
889                         rxr->mbuf_sz = MCLBYTES;
890                 else
891                         rxr->mbuf_sz = MJUMPAGESIZE;
892                 ixl_init_rx_ring(que);
893         }
894
895         /* Configure queues */
896         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
897             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
898
899         /* Set up RSS */
900         ixlv_config_rss(sc);
901
902         /* Map vectors */
903         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
904             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
905
906         /* Enable queues */
907         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
908             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
909
910         /* Start the local timer */
911         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
912
913         sc->init_state = IXLV_RUNNING;
914
915 init_done:
916         INIT_DBG_IF(ifp, "end");
917         return;
918 }
919
920 /*
921 **  Init entry point for the stack
922 */
923 void
924 ixlv_init(void *arg)
925 {
926         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
927         struct ixlv_sc *sc = vsi->back;
928         int retries = 0;
929
930         mtx_lock(&sc->mtx);
931         ixlv_init_locked(sc);
932         mtx_unlock(&sc->mtx);
933
934         /* Wait for init_locked to finish */
935         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
936             && ++retries < 100) {
937                 i40e_msec_delay(10);
938         }
939         if (retries >= IXLV_AQ_MAX_ERR)
940                 if_printf(vsi->ifp,
941                     "Init failed to complete in alloted time!\n");
942 }
943
944 /*
945  * ixlv_attach() helper function; gathers information about
946  * the (virtual) hardware for use elsewhere in the driver.
947  */
948 static void
949 ixlv_init_hw(struct ixlv_sc *sc)
950 {
951         struct i40e_hw *hw = &sc->hw;
952         device_t dev = sc->dev;
953         
954         /* Save off the information about this board */
955         hw->vendor_id = pci_get_vendor(dev);
956         hw->device_id = pci_get_device(dev);
957         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
958         hw->subsystem_vendor_id =
959             pci_read_config(dev, PCIR_SUBVEND_0, 2);
960         hw->subsystem_device_id =
961             pci_read_config(dev, PCIR_SUBDEV_0, 2);
962
963         hw->bus.device = pci_get_slot(dev);
964         hw->bus.func = pci_get_function(dev);
965 }
966
967 /*
968  * ixlv_attach() helper function; initalizes the admin queue
969  * and attempts to establish contact with the PF by
970  * retrying the initial "API version" message several times
971  * or until the PF responds.
972  */
973 static int
974 ixlv_setup_vc(struct ixlv_sc *sc)
975 {
976         struct i40e_hw *hw = &sc->hw;
977         device_t dev = sc->dev;
978         int error = 0, ret_error = 0, asq_retries = 0;
979         bool send_api_ver_retried = 0;
980
981         /* Need to set these AQ paramters before initializing AQ */
982         hw->aq.num_arq_entries = IXL_AQ_LEN;
983         hw->aq.num_asq_entries = IXL_AQ_LEN;
984         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
985         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
986
987         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
988                 /* Initialize admin queue */
989                 error = i40e_init_adminq(hw);
990                 if (error) {
991                         device_printf(dev, "%s: init_adminq failed: %d\n",
992                             __func__, error);
993                         ret_error = 1;
994                         continue;
995                 }
996
997                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
998
999 retry_send:
1000                 /* Send VF's API version */
1001                 error = ixlv_send_api_ver(sc);
1002                 if (error) {
1003                         i40e_shutdown_adminq(hw);
1004                         ret_error = 2;
1005                         device_printf(dev, "%s: unable to send api"
1006                             " version to PF on attempt %d, error %d\n",
1007                             __func__, i+1, error);
1008                 }
1009
1010                 asq_retries = 0;
1011                 while (!i40e_asq_done(hw)) {
1012                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1013                                 i40e_shutdown_adminq(hw);
1014                                 DDPRINTF(dev, "Admin Queue timeout "
1015                                     "(waiting for send_api_ver), %d more retries...",
1016                                     IXLV_AQ_MAX_ERR - (i + 1));
1017                                 ret_error = 3;
1018                                 break;
1019                         } 
1020                         i40e_msec_delay(10);
1021                 }
1022                 if (asq_retries > IXLV_AQ_MAX_ERR)
1023                         continue;
1024
1025                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1026
1027                 /* Verify that the VF accepts the PF's API version */
1028                 error = ixlv_verify_api_ver(sc);
1029                 if (error == ETIMEDOUT) {
1030                         if (!send_api_ver_retried) {
1031                                 /* Resend message, one more time */
1032                                 send_api_ver_retried++;
1033                                 device_printf(dev,
1034                                     "%s: Timeout while verifying API version on first"
1035                                     " try!\n", __func__);
1036                                 goto retry_send;
1037                         } else {
1038                                 device_printf(dev,
1039                                     "%s: Timeout while verifying API version on second"
1040                                     " try!\n", __func__);
1041                                 ret_error = 4;
1042                                 break;
1043                         }
1044                 }
1045                 if (error) {
1046                         device_printf(dev,
1047                             "%s: Unable to verify API version,"
1048                             " error %d\n", __func__, error);
1049                         ret_error = 5;
1050                 }
1051                 break;
1052         }
1053
1054         if (ret_error >= 4)
1055                 i40e_shutdown_adminq(hw);
1056         return (ret_error);
1057 }
1058
1059 /*
1060  * ixlv_attach() helper function; asks the PF for this VF's
1061  * configuration, and saves the information if it receives it.
1062  */
1063 static int
1064 ixlv_vf_config(struct ixlv_sc *sc)
1065 {
1066         struct i40e_hw *hw = &sc->hw;
1067         device_t dev = sc->dev;
1068         int bufsz, error = 0, ret_error = 0;
1069         int asq_retries, retried = 0;
1070
1071 retry_config:
1072         error = ixlv_send_vf_config_msg(sc);
1073         if (error) {
1074                 device_printf(dev,
1075                     "%s: Unable to send VF config request, attempt %d,"
1076                     " error %d\n", __func__, retried + 1, error);
1077                 ret_error = 2;
1078         }
1079
1080         asq_retries = 0;
1081         while (!i40e_asq_done(hw)) {
1082                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1083                         device_printf(dev, "%s: Admin Queue timeout "
1084                             "(waiting for send_vf_config_msg), attempt %d\n",
1085                             __func__, retried + 1);
1086                         ret_error = 3;
1087                         goto fail;
1088                 }
1089                 i40e_msec_delay(10);
1090         }
1091
1092         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1093             retried + 1);
1094
1095         if (!sc->vf_res) {
1096                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1097                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1098                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1099                 if (!sc->vf_res) {
1100                         device_printf(dev,
1101                             "%s: Unable to allocate memory for VF configuration"
1102                             " message from PF on attempt %d\n", __func__, retried + 1);
1103                         ret_error = 1;
1104                         goto fail;
1105                 }
1106         }
1107
1108         /* Check for VF config response */
1109         error = ixlv_get_vf_config(sc);
1110         if (error == ETIMEDOUT) {
1111                 /* The 1st time we timeout, send the configuration message again */
1112                 if (!retried) {
1113                         retried++;
1114                         goto retry_config;
1115                 }
1116         }
1117         if (error) {
1118                 device_printf(dev,
1119                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1120                     __func__, retried + 1);
1121                 ret_error = 4;
1122         }
1123         goto done;
1124
1125 fail:
1126         free(sc->vf_res, M_DEVBUF);
1127 done:
1128         return (ret_error);
1129 }
1130
1131 /*
1132  * Allocate MSI/X vectors, setup the AQ vector early
1133  */
1134 static int
1135 ixlv_init_msix(struct ixlv_sc *sc)
1136 {
1137         device_t dev = sc->dev;
1138         int rid, want, vectors, queues, available;
1139
1140         rid = PCIR_BAR(IXL_BAR);
1141         sc->msix_mem = bus_alloc_resource_any(dev,
1142             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1143         if (!sc->msix_mem) {
1144                 /* May not be enabled */
1145                 device_printf(sc->dev,
1146                     "Unable to map MSIX table \n");
1147                 goto fail;
1148         }
1149
1150         available = pci_msix_count(dev); 
1151         if (available == 0) { /* system has msix disabled */
1152                 bus_release_resource(dev, SYS_RES_MEMORY,
1153                     rid, sc->msix_mem);
1154                 sc->msix_mem = NULL;
1155                 goto fail;
1156         }
1157
1158         /* Figure out a reasonable auto config value */
1159         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1160
1161         /* Override with hardcoded value if sane */
1162         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1163                 queues = ixlv_max_queues;
1164
1165         /* Enforce the VF max value */
1166         if (queues > IXLV_MAX_QUEUES)
1167                 queues = IXLV_MAX_QUEUES;
1168
1169         /*
1170         ** Want one vector (RX/TX pair) per queue
1171         ** plus an additional for the admin queue.
1172         */
1173         want = queues + 1;
1174         if (want <= available)  /* Have enough */
1175                 vectors = want;
1176         else {
1177                 device_printf(sc->dev,
1178                     "MSIX Configuration Problem, "
1179                     "%d vectors available but %d wanted!\n",
1180                     available, want);
1181                 goto fail;
1182         }
1183
1184         if (pci_alloc_msix(dev, &vectors) == 0) {
1185                 device_printf(sc->dev,
1186                     "Using MSIX interrupts with %d vectors\n", vectors);
1187                 sc->msix = vectors;
1188                 sc->vsi.num_queues = queues;
1189         }
1190
1191         /*
1192         ** Explicitly set the guest PCI BUSMASTER capability
1193         ** and we must rewrite the ENABLE in the MSIX control
1194         ** register again at this point to cause the host to
1195         ** successfully initialize us.
1196         */
1197         {
1198                 u16 pci_cmd_word;
1199                 int msix_ctrl;
1200                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1201                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1202                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1203                 pci_find_cap(dev, PCIY_MSIX, &rid);
1204                 rid += PCIR_MSIX_CTRL;
1205                 msix_ctrl = pci_read_config(dev, rid, 2);
1206                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1207                 pci_write_config(dev, rid, msix_ctrl, 2);
1208         }
1209
1210         /* Next we need to setup the vector for the Admin Queue */
1211         rid = 1;        // zero vector + 1
1212         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1213             &rid, RF_SHAREABLE | RF_ACTIVE);
1214         if (sc->res == NULL) {
1215                 device_printf(dev,"Unable to allocate"
1216                     " bus resource: AQ interrupt \n");
1217                 goto fail;
1218         }
1219         if (bus_setup_intr(dev, sc->res,
1220             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1221             ixlv_msix_adminq, sc, &sc->tag)) {
1222                 sc->res = NULL;
1223                 device_printf(dev, "Failed to register AQ handler");
1224                 goto fail;
1225         }
1226         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1227
1228         return (vectors);
1229
1230 fail:
1231         /* The VF driver MUST use MSIX */
1232         return (0);
1233 }
1234
1235 static int
1236 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1237 {
1238         int             rid;
1239         device_t        dev = sc->dev;
1240
1241         rid = PCIR_BAR(0);
1242         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1243             &rid, RF_ACTIVE);
1244
1245         if (!(sc->pci_mem)) {
1246                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1247                 return (ENXIO);
1248         }
1249
1250         sc->osdep.mem_bus_space_tag =
1251                 rman_get_bustag(sc->pci_mem);
1252         sc->osdep.mem_bus_space_handle =
1253                 rman_get_bushandle(sc->pci_mem);
1254         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1255         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1256         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1257
1258         sc->hw.back = &sc->osdep;
1259
1260         /* Disable adminq interrupts */
1261         ixlv_disable_adminq_irq(&sc->hw);
1262
1263         /*
1264         ** Now setup MSI/X, it will return
1265         ** us the number of supported vectors
1266         */
1267         sc->msix = ixlv_init_msix(sc);
1268
1269         /* We fail without MSIX support */
1270         if (sc->msix == 0)
1271                 return (ENXIO);
1272
1273         return (0);
1274 }
1275
1276 static void
1277 ixlv_free_pci_resources(struct ixlv_sc *sc)
1278 {
1279         struct ixl_vsi         *vsi = &sc->vsi;
1280         struct ixl_queue       *que = vsi->queues;
1281         device_t                dev = sc->dev;
1282
1283         /* We may get here before stations are setup */
1284         if (que == NULL)
1285                 goto early;
1286
1287         /*
1288         **  Release all msix queue resources:
1289         */
1290         for (int i = 0; i < vsi->num_queues; i++, que++) {
1291                 int rid = que->msix + 1;
1292                 if (que->tag != NULL) {
1293                         bus_teardown_intr(dev, que->res, que->tag);
1294                         que->tag = NULL;
1295                 }
1296                 if (que->res != NULL)
1297                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1298         }
1299         
1300 early:
1301         /* Clean the AdminQ interrupt */
1302         if (sc->tag != NULL) {
1303                 bus_teardown_intr(dev, sc->res, sc->tag);
1304                 sc->tag = NULL;
1305         }
1306         if (sc->res != NULL)
1307                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1308
1309         pci_release_msi(dev);
1310
1311         if (sc->msix_mem != NULL)
1312                 bus_release_resource(dev, SYS_RES_MEMORY,
1313                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1314
1315         if (sc->pci_mem != NULL)
1316                 bus_release_resource(dev, SYS_RES_MEMORY,
1317                     PCIR_BAR(0), sc->pci_mem);
1318
1319         return;
1320 }
1321
1322 /*
1323  * Create taskqueue and tasklet for Admin Queue interrupts.
1324  */
1325 static int
1326 ixlv_init_taskqueue(struct ixlv_sc *sc)
1327 {
1328         int error = 0;
1329
1330         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1331
1332         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1333             taskqueue_thread_enqueue, &sc->tq);
1334         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1335             device_get_nameunit(sc->dev));
1336
1337         return (error);
1338 }
1339
1340 /*********************************************************************
1341  *
1342  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1343  *
1344  **********************************************************************/
1345 static int
1346 ixlv_assign_msix(struct ixlv_sc *sc)
1347 {
1348         device_t        dev = sc->dev;
1349         struct          ixl_vsi *vsi = &sc->vsi;
1350         struct          ixl_queue *que = vsi->queues;
1351         struct          tx_ring  *txr;
1352         int             error, rid, vector = 1;
1353
1354         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1355                 rid = vector + 1;
1356                 txr = &que->txr;
1357                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1358                     RF_SHAREABLE | RF_ACTIVE);
1359                 if (que->res == NULL) {
1360                         device_printf(dev,"Unable to allocate"
1361                             " bus resource: que interrupt [%d]\n", vector);
1362                         return (ENXIO);
1363                 }
1364                 /* Set the handler function */
1365                 error = bus_setup_intr(dev, que->res,
1366                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1367                     ixlv_msix_que, que, &que->tag);
1368                 if (error) {
1369                         que->res = NULL;
1370                         device_printf(dev, "Failed to register que handler");
1371                         return (error);
1372                 }
1373                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1374                 /* Bind the vector to a CPU */
1375                 bus_bind_intr(dev, que->res, i);
1376                 que->msix = vector;
1377                 vsi->que_mask |= (u64)(1 << que->msix);
1378                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1379                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1380                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1381                     taskqueue_thread_enqueue, &que->tq);
1382                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1383                     device_get_nameunit(sc->dev));
1384         }
1385
1386         return (0);
1387 }
1388
1389 /*
1390 ** Requests a VF reset from the PF.
1391 **
1392 ** Requires the VF's Admin Queue to be initialized.
1393 */
1394 static int
1395 ixlv_reset(struct ixlv_sc *sc)
1396 {
1397         struct i40e_hw  *hw = &sc->hw;
1398         device_t        dev = sc->dev;
1399         int             error = 0;
1400
1401         /* Ask the PF to reset us if we are initiating */
1402         if (sc->init_state != IXLV_RESET_PENDING)
1403                 ixlv_request_reset(sc);
1404
1405         i40e_msec_delay(100);
1406         error = ixlv_reset_complete(hw);
1407         if (error) {
1408                 device_printf(dev, "%s: VF reset failed\n",
1409                     __func__);
1410                 return (error);
1411         }
1412
1413         error = i40e_shutdown_adminq(hw);
1414         if (error) {
1415                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1416                     __func__, error);
1417                 return (error);
1418         }
1419
1420         error = i40e_init_adminq(hw);
1421         if (error) {
1422                 device_printf(dev, "%s: init_adminq failed: %d\n",
1423                     __func__, error);
1424                 return(error);
1425         }
1426
1427         return (0);
1428 }
1429
1430 static int
1431 ixlv_reset_complete(struct i40e_hw *hw)
1432 {
1433         u32 reg;
1434
1435         for (int i = 0; i < 100; i++) {
1436                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1437                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1438
1439                 if ((reg == I40E_VFR_VFACTIVE) ||
1440                     (reg == I40E_VFR_COMPLETED))
1441                         return (0);
1442                 i40e_msec_delay(100);
1443         }
1444
1445         return (EBUSY);
1446 }
1447
1448
1449 /*********************************************************************
1450  *
1451  *  Setup networking device structure and register an interface.
1452  *
1453  **********************************************************************/
1454 static int
1455 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1456 {
1457         struct ifnet            *ifp;
1458         struct ixl_vsi          *vsi = &sc->vsi;
1459         struct ixl_queue        *que = vsi->queues;
1460
1461         INIT_DBG_DEV(dev, "begin");
1462
1463         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1464         if (ifp == NULL) {
1465                 device_printf(dev, "%s: could not allocate ifnet"
1466                     " structure!\n", __func__);
1467                 return (-1);
1468         }
1469
1470         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1471
1472         ifp->if_mtu = ETHERMTU;
1473         ifp->if_baudrate = 4000000000;  // ??
1474         ifp->if_init = ixlv_init;
1475         ifp->if_softc = vsi;
1476         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1477         ifp->if_ioctl = ixlv_ioctl;
1478
1479 #if __FreeBSD_version >= 1100000
1480         if_setgetcounterfn(ifp, ixl_get_counter);
1481 #endif
1482
1483         ifp->if_transmit = ixl_mq_start;
1484
1485         ifp->if_qflush = ixl_qflush;
1486         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1487
1488         ether_ifattach(ifp, sc->hw.mac.addr);
1489
1490         vsi->max_frame_size =
1491             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1492             + ETHER_VLAN_ENCAP_LEN;
1493
1494         /*
1495          * Tell the upper layer(s) we support long frames.
1496          */
1497         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1498
1499         ifp->if_capabilities |= IFCAP_HWCSUM;
1500         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1501         ifp->if_capabilities |= IFCAP_TSO;
1502         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1503
1504         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1505                              |  IFCAP_VLAN_HWTSO
1506                              |  IFCAP_VLAN_MTU
1507                              |  IFCAP_VLAN_HWCSUM
1508                              |  IFCAP_LRO;
1509         ifp->if_capenable = ifp->if_capabilities;
1510
1511         /*
1512         ** Don't turn this on by default, if vlans are
1513         ** created on another pseudo device (eg. lagg)
1514         ** then vlan events are not passed thru, breaking
1515         ** operation, but with HW FILTER off it works. If
1516         ** using vlans directly on the ixl driver you can
1517         ** enable this and get full hardware tag filtering.
1518         */
1519         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1520
1521         /*
1522          * Specify the media types supported by this adapter and register
1523          * callbacks to update media and link information
1524          */
1525         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1526                      ixlv_media_status);
1527
1528         // JFV Add media types later?
1529
1530         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1531         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1532
1533         INIT_DBG_DEV(dev, "end");
1534         return (0);
1535 }
1536
1537 /*
1538 ** Allocate and setup the interface queues
1539 */
1540 static int
1541 ixlv_setup_queues(struct ixlv_sc *sc)
1542 {
1543         device_t                dev = sc->dev;
1544         struct ixl_vsi          *vsi;
1545         struct ixl_queue        *que;
1546         struct tx_ring          *txr;
1547         struct rx_ring          *rxr;
1548         int                     rsize, tsize;
1549         int                     error = I40E_SUCCESS;
1550
1551         vsi = &sc->vsi;
1552         vsi->back = (void *)sc;
1553         vsi->hw = &sc->hw;
1554         vsi->num_vlans = 0;
1555
1556         /* Get memory for the station queues */
1557         if (!(vsi->queues =
1558                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1559                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1560                         device_printf(dev, "Unable to allocate queue memory\n");
1561                         error = ENOMEM;
1562                         goto early;
1563         }
1564
1565         for (int i = 0; i < vsi->num_queues; i++) {
1566                 que = &vsi->queues[i];
1567                 que->num_desc = ixlv_ringsz;
1568                 que->me = i;
1569                 que->vsi = vsi;
1570                 /* mark the queue as active */
1571                 vsi->active_queues |= (u64)1 << que->me;
1572
1573                 txr = &que->txr;
1574                 txr->que = que;
1575                 txr->tail = I40E_QTX_TAIL1(que->me);
1576                 /* Initialize the TX lock */
1577                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1578                     device_get_nameunit(dev), que->me);
1579                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1580                 /*
1581                 ** Create the TX descriptor ring, the extra int is
1582                 ** added as the location for HEAD WB.
1583                 */
1584                 tsize = roundup2((que->num_desc *
1585                     sizeof(struct i40e_tx_desc)) +
1586                     sizeof(u32), DBA_ALIGN);
1587                 if (i40e_allocate_dma_mem(&sc->hw,
1588                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1589                         device_printf(dev,
1590                             "Unable to allocate TX Descriptor memory\n");
1591                         error = ENOMEM;
1592                         goto fail;
1593                 }
1594                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1595                 bzero((void *)txr->base, tsize);
1596                 /* Now allocate transmit soft structs for the ring */
1597                 if (ixl_allocate_tx_data(que)) {
1598                         device_printf(dev,
1599                             "Critical Failure setting up TX structures\n");
1600                         error = ENOMEM;
1601                         goto fail;
1602                 }
1603                 /* Allocate a buf ring */
1604                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1605                     M_WAITOK, &txr->mtx);
1606                 if (txr->br == NULL) {
1607                         device_printf(dev,
1608                             "Critical Failure setting up TX buf ring\n");
1609                         error = ENOMEM;
1610                         goto fail;
1611                 }
1612
1613                 /*
1614                  * Next the RX queues...
1615                  */ 
1616                 rsize = roundup2(que->num_desc *
1617                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1618                 rxr = &que->rxr;
1619                 rxr->que = que;
1620                 rxr->tail = I40E_QRX_TAIL1(que->me);
1621
1622                 /* Initialize the RX side lock */
1623                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1624                     device_get_nameunit(dev), que->me);
1625                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1626
1627                 if (i40e_allocate_dma_mem(&sc->hw,
1628                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1629                         device_printf(dev,
1630                             "Unable to allocate RX Descriptor memory\n");
1631                         error = ENOMEM;
1632                         goto fail;
1633                 }
1634                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1635                 bzero((void *)rxr->base, rsize);
1636
1637                 /* Allocate receive soft structs for the ring*/
1638                 if (ixl_allocate_rx_data(que)) {
1639                         device_printf(dev,
1640                             "Critical Failure setting up receive structs\n");
1641                         error = ENOMEM;
1642                         goto fail;
1643                 }
1644         }
1645
1646         return (0);
1647
1648 fail:
1649         free(vsi->queues, M_DEVBUF);
1650         for (int i = 0; i < vsi->num_queues; i++) {
1651                 que = &vsi->queues[i];
1652                 rxr = &que->rxr;
1653                 txr = &que->txr;
1654                 if (rxr->base)
1655                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1656                 if (txr->base)
1657                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1658         }
1659
1660 early:
1661         return (error);
1662 }
1663
1664 /*
1665 ** This routine is run via an vlan config EVENT,
1666 ** it enables us to use the HW Filter table since
1667 ** we can get the vlan id. This just creates the
1668 ** entry in the soft version of the VFTA, init will
1669 ** repopulate the real table.
1670 */
1671 static void
1672 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1673 {
1674         struct ixl_vsi                  *vsi = ifp->if_softc;
1675         struct ixlv_sc          *sc = vsi->back;
1676         struct ixlv_vlan_filter *v;
1677
1678
1679         if (ifp->if_softc !=  arg)   /* Not our event */
1680                 return;
1681
1682         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1683                 return;
1684
1685         /* Sanity check - make sure it doesn't already exist */
1686         SLIST_FOREACH(v, sc->vlan_filters, next) {
1687                 if (v->vlan == vtag)
1688                         return;
1689         }
1690
1691         mtx_lock(&sc->mtx);
1692         ++vsi->num_vlans;
1693         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1694         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1695         v->vlan = vtag;
1696         v->flags = IXL_FILTER_ADD;
1697         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1698             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1699         mtx_unlock(&sc->mtx);
1700         return;
1701 }
1702
1703 /*
1704 ** This routine is run via an vlan
1705 ** unconfig EVENT, remove our entry
1706 ** in the soft vfta.
1707 */
1708 static void
1709 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1710 {
1711         struct ixl_vsi                  *vsi = ifp->if_softc;
1712         struct ixlv_sc          *sc = vsi->back;
1713         struct ixlv_vlan_filter *v;
1714         int                             i = 0;
1715         
1716         if (ifp->if_softc !=  arg)
1717                 return;
1718
1719         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1720                 return;
1721
1722         mtx_lock(&sc->mtx);
1723         SLIST_FOREACH(v, sc->vlan_filters, next) {
1724                 if (v->vlan == vtag) {
1725                         v->flags = IXL_FILTER_DEL;
1726                         ++i;
1727                         --vsi->num_vlans;
1728                 }
1729         }
1730         if (i)
1731                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1732                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1733         mtx_unlock(&sc->mtx);
1734         return;
1735 }
1736
1737 /*
1738 ** Get a new filter and add it to the mac filter list.
1739 */
1740 static struct ixlv_mac_filter *
1741 ixlv_get_mac_filter(struct ixlv_sc *sc)
1742 {
1743         struct ixlv_mac_filter  *f;
1744
1745         f = malloc(sizeof(struct ixlv_mac_filter),
1746             M_DEVBUF, M_NOWAIT | M_ZERO);
1747         if (f)
1748                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1749
1750         return (f);
1751 }
1752
1753 /*
1754 ** Find the filter with matching MAC address
1755 */
1756 static struct ixlv_mac_filter *
1757 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1758 {
1759         struct ixlv_mac_filter  *f;
1760         bool                            match = FALSE;
1761
1762         SLIST_FOREACH(f, sc->mac_filters, next) {
1763                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1764                         match = TRUE;
1765                         break;
1766                 }
1767         }       
1768
1769         if (!match)
1770                 f = NULL;
1771         return (f);
1772 }
1773
1774 /*
1775 ** Admin Queue interrupt handler
1776 */
1777 static void
1778 ixlv_msix_adminq(void *arg)
1779 {
1780         struct ixlv_sc  *sc = arg;
1781         struct i40e_hw  *hw = &sc->hw;
1782         device_t        dev = sc->dev;
1783         u32             reg, mask, oldreg;
1784
1785         reg = rd32(hw, I40E_VFINT_ICR01);
1786         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1787
1788         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1789         reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1790         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1791
1792         /* check for Admin queue errors */
1793         oldreg = reg = rd32(hw, hw->aq.arq.len);
1794         if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1795                 device_printf(dev, "ARQ VF Error detected\n");
1796                 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1797         }
1798         if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1799                 device_printf(dev, "ARQ Overflow Error detected\n");
1800                 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1801         }
1802         if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1803                 device_printf(dev, "ARQ Critical Error detected\n");
1804                 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1805         }
1806         if (oldreg != reg)
1807                 wr32(hw, hw->aq.arq.len, reg);
1808
1809         oldreg = reg = rd32(hw, hw->aq.asq.len);
1810         if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1811                 device_printf(dev, "ASQ VF Error detected\n");
1812                 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1813         }
1814         if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1815                 device_printf(dev, "ASQ Overflow Error detected\n");
1816                 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1817         }
1818         if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1819                 device_printf(dev, "ASQ Critical Error detected\n");
1820                 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1821         }
1822         if (oldreg != reg)
1823                 wr32(hw, hw->aq.asq.len, reg);
1824
1825         /* re-enable interrupt causes */
1826         wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1827         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1828
1829         /* schedule task */
1830         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1831         return;
1832 }
1833
1834 void
1835 ixlv_enable_intr(struct ixl_vsi *vsi)
1836 {
1837         struct i40e_hw          *hw = vsi->hw;
1838         struct ixl_queue        *que = vsi->queues;
1839
1840         ixlv_enable_adminq_irq(hw);
1841         for (int i = 0; i < vsi->num_queues; i++, que++)
1842                 ixlv_enable_queue_irq(hw, que->me);
1843 }
1844
1845 void
1846 ixlv_disable_intr(struct ixl_vsi *vsi)
1847 {
1848         struct i40e_hw          *hw = vsi->hw;
1849         struct ixl_queue       *que = vsi->queues;
1850
1851         ixlv_disable_adminq_irq(hw);
1852         for (int i = 0; i < vsi->num_queues; i++, que++)
1853                 ixlv_disable_queue_irq(hw, que->me);
1854 }
1855
1856
1857 static void
1858 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1859 {
1860         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1861         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1862         /* flush */
1863         rd32(hw, I40E_VFGEN_RSTAT);
1864         return;
1865 }
1866
1867 static void
1868 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1869 {
1870         wr32(hw, I40E_VFINT_DYN_CTL01,
1871             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1872             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1873         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1874         /* flush */
1875         rd32(hw, I40E_VFGEN_RSTAT);
1876         return;
1877 }
1878
1879 static void
1880 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1881 {
1882         u32             reg;
1883
1884         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1885             I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; 
1886         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1887 }
1888
1889 static void
1890 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1891 {
1892         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1893         rd32(hw, I40E_VFGEN_RSTAT);
1894         return;
1895 }
1896
1897
1898 /*
1899 ** Provide a update to the queue RX
1900 ** interrupt moderation value.
1901 */
1902 static void
1903 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1904 {
1905         struct ixl_vsi  *vsi = que->vsi;
1906         struct i40e_hw  *hw = vsi->hw;
1907         struct rx_ring  *rxr = &que->rxr;
1908         u16             rx_itr;
1909         u16             rx_latency = 0;
1910         int             rx_bytes;
1911
1912
1913         /* Idle, do nothing */
1914         if (rxr->bytes == 0)
1915                 return;
1916
1917         if (ixlv_dynamic_rx_itr) {
1918                 rx_bytes = rxr->bytes/rxr->itr;
1919                 rx_itr = rxr->itr;
1920
1921                 /* Adjust latency range */
1922                 switch (rxr->latency) {
1923                 case IXL_LOW_LATENCY:
1924                         if (rx_bytes > 10) {
1925                                 rx_latency = IXL_AVE_LATENCY;
1926                                 rx_itr = IXL_ITR_20K;
1927                         }
1928                         break;
1929                 case IXL_AVE_LATENCY:
1930                         if (rx_bytes > 20) {
1931                                 rx_latency = IXL_BULK_LATENCY;
1932                                 rx_itr = IXL_ITR_8K;
1933                         } else if (rx_bytes <= 10) {
1934                                 rx_latency = IXL_LOW_LATENCY;
1935                                 rx_itr = IXL_ITR_100K;
1936                         }
1937                         break;
1938                 case IXL_BULK_LATENCY:
1939                         if (rx_bytes <= 20) {
1940                                 rx_latency = IXL_AVE_LATENCY;
1941                                 rx_itr = IXL_ITR_20K;
1942                         }
1943                         break;
1944                  }
1945
1946                 rxr->latency = rx_latency;
1947
1948                 if (rx_itr != rxr->itr) {
1949                         /* do an exponential smoothing */
1950                         rx_itr = (10 * rx_itr * rxr->itr) /
1951                             ((9 * rx_itr) + rxr->itr);
1952                         rxr->itr = rx_itr & IXL_MAX_ITR;
1953                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1954                             que->me), rxr->itr);
1955                 }
1956         } else { /* We may have have toggled to non-dynamic */
1957                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1958                         vsi->rx_itr_setting = ixlv_rx_itr;
1959                 /* Update the hardware if needed */
1960                 if (rxr->itr != vsi->rx_itr_setting) {
1961                         rxr->itr = vsi->rx_itr_setting;
1962                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1963                             que->me), rxr->itr);
1964                 }
1965         }
1966         rxr->bytes = 0;
1967         rxr->packets = 0;
1968         return;
1969 }
1970
1971
1972 /*
1973 ** Provide a update to the queue TX
1974 ** interrupt moderation value.
1975 */
1976 static void
1977 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1978 {
1979         struct ixl_vsi  *vsi = que->vsi;
1980         struct i40e_hw  *hw = vsi->hw;
1981         struct tx_ring  *txr = &que->txr;
1982         u16             tx_itr;
1983         u16             tx_latency = 0;
1984         int             tx_bytes;
1985
1986
1987         /* Idle, do nothing */
1988         if (txr->bytes == 0)
1989                 return;
1990
1991         if (ixlv_dynamic_tx_itr) {
1992                 tx_bytes = txr->bytes/txr->itr;
1993                 tx_itr = txr->itr;
1994
1995                 switch (txr->latency) {
1996                 case IXL_LOW_LATENCY:
1997                         if (tx_bytes > 10) {
1998                                 tx_latency = IXL_AVE_LATENCY;
1999                                 tx_itr = IXL_ITR_20K;
2000                         }
2001                         break;
2002                 case IXL_AVE_LATENCY:
2003                         if (tx_bytes > 20) {
2004                                 tx_latency = IXL_BULK_LATENCY;
2005                                 tx_itr = IXL_ITR_8K;
2006                         } else if (tx_bytes <= 10) {
2007                                 tx_latency = IXL_LOW_LATENCY;
2008                                 tx_itr = IXL_ITR_100K;
2009                         }
2010                         break;
2011                 case IXL_BULK_LATENCY:
2012                         if (tx_bytes <= 20) {
2013                                 tx_latency = IXL_AVE_LATENCY;
2014                                 tx_itr = IXL_ITR_20K;
2015                         }
2016                         break;
2017                 }
2018
2019                 txr->latency = tx_latency;
2020
2021                 if (tx_itr != txr->itr) {
2022                  /* do an exponential smoothing */
2023                         tx_itr = (10 * tx_itr * txr->itr) /
2024                             ((9 * tx_itr) + txr->itr);
2025                         txr->itr = tx_itr & IXL_MAX_ITR;
2026                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2027                             que->me), txr->itr);
2028                 }
2029
2030         } else { /* We may have have toggled to non-dynamic */
2031                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2032                         vsi->tx_itr_setting = ixlv_tx_itr;
2033                 /* Update the hardware if needed */
2034                 if (txr->itr != vsi->tx_itr_setting) {
2035                         txr->itr = vsi->tx_itr_setting;
2036                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2037                             que->me), txr->itr);
2038                 }
2039         }
2040         txr->bytes = 0;
2041         txr->packets = 0;
2042         return;
2043 }
2044
2045
2046 /*
2047 **
2048 ** MSIX Interrupt Handlers and Tasklets
2049 **
2050 */
2051 static void
2052 ixlv_handle_que(void *context, int pending)
2053 {
2054         struct ixl_queue *que = context;
2055         struct ixl_vsi *vsi = que->vsi;
2056         struct i40e_hw  *hw = vsi->hw;
2057         struct tx_ring  *txr = &que->txr;
2058         struct ifnet    *ifp = vsi->ifp;
2059         bool            more;
2060
2061         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2062                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2063                 mtx_lock(&txr->mtx);
2064                 ixl_txeof(que);
2065                 if (!drbr_empty(ifp, txr->br))
2066                         ixl_mq_start_locked(ifp, txr);
2067                 mtx_unlock(&txr->mtx);
2068                 if (more) {
2069                         taskqueue_enqueue(que->tq, &que->task);
2070                         return;
2071                 }
2072         }
2073
2074         /* Reenable this interrupt - hmmm */
2075         ixlv_enable_queue_irq(hw, que->me);
2076         return;
2077 }
2078
2079
2080 /*********************************************************************
2081  *
2082  *  MSIX Queue Interrupt Service routine
2083  *
2084  **********************************************************************/
2085 static void
2086 ixlv_msix_que(void *arg)
2087 {
2088         struct ixl_queue        *que = arg;
2089         struct ixl_vsi  *vsi = que->vsi;
2090         struct i40e_hw  *hw = vsi->hw;
2091         struct tx_ring  *txr = &que->txr;
2092         bool            more_tx, more_rx;
2093
2094         /* Spurious interrupts are ignored */
2095         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2096                 return;
2097
2098         ++que->irqs;
2099
2100         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2101
2102         mtx_lock(&txr->mtx);
2103         more_tx = ixl_txeof(que);
2104         /*
2105         ** Make certain that if the stack 
2106         ** has anything queued the task gets
2107         ** scheduled to handle it.
2108         */
2109         if (!drbr_empty(vsi->ifp, txr->br))
2110                 more_tx = 1;
2111         mtx_unlock(&txr->mtx);
2112
2113         ixlv_set_queue_rx_itr(que);
2114         ixlv_set_queue_tx_itr(que);
2115
2116         if (more_tx || more_rx)
2117                 taskqueue_enqueue(que->tq, &que->task);
2118         else
2119                 ixlv_enable_queue_irq(hw, que->me);
2120
2121         return;
2122 }
2123
2124
2125 /*********************************************************************
2126  *
2127  *  Media Ioctl callback
2128  *
2129  *  This routine is called whenever the user queries the status of
2130  *  the interface using ifconfig.
2131  *
2132  **********************************************************************/
2133 static void
2134 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2135 {
2136         struct ixl_vsi          *vsi = ifp->if_softc;
2137         struct ixlv_sc  *sc = vsi->back;
2138
2139         INIT_DBG_IF(ifp, "begin");
2140
2141         mtx_lock(&sc->mtx);
2142
2143         ixlv_update_link_status(sc);
2144
2145         ifmr->ifm_status = IFM_AVALID;
2146         ifmr->ifm_active = IFM_ETHER;
2147
2148         if (!vsi->link_up) {
2149                 mtx_unlock(&sc->mtx);
2150                 INIT_DBG_IF(ifp, "end: link not up");
2151                 return;
2152         }
2153
2154         ifmr->ifm_status |= IFM_ACTIVE;
2155         /* Hardware is always full-duplex */
2156         ifmr->ifm_active |= IFM_FDX;
2157         mtx_unlock(&sc->mtx);
2158         INIT_DBG_IF(ifp, "end");
2159         return;
2160 }
2161
2162 /*********************************************************************
2163  *
2164  *  Media Ioctl callback
2165  *
2166  *  This routine is called when the user changes speed/duplex using
2167  *  media/mediopt option with ifconfig.
2168  *
2169  **********************************************************************/
2170 static int
2171 ixlv_media_change(struct ifnet * ifp)
2172 {
2173         struct ixl_vsi *vsi = ifp->if_softc;
2174         struct ifmedia *ifm = &vsi->media;
2175
2176         INIT_DBG_IF(ifp, "begin");
2177
2178         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2179                 return (EINVAL);
2180
2181         INIT_DBG_IF(ifp, "end");
2182         return (0);
2183 }
2184
2185
2186 /*********************************************************************
2187  *  Multicast Initialization
2188  *
2189  *  This routine is called by init to reset a fresh state.
2190  *
2191  **********************************************************************/
2192
2193 static void
2194 ixlv_init_multi(struct ixl_vsi *vsi)
2195 {
2196         struct ixlv_mac_filter *f;
2197         struct ixlv_sc  *sc = vsi->back;
2198         int                     mcnt = 0;
2199
2200         IOCTL_DBG_IF(vsi->ifp, "begin");
2201
2202         /* First clear any multicast filters */
2203         SLIST_FOREACH(f, sc->mac_filters, next) {
2204                 if ((f->flags & IXL_FILTER_USED)
2205                     && (f->flags & IXL_FILTER_MC)) {
2206                         f->flags |= IXL_FILTER_DEL;
2207                         mcnt++;
2208                 }
2209         }
2210         if (mcnt > 0)
2211                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2212                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2213                     sc);
2214
2215         IOCTL_DBG_IF(vsi->ifp, "end");
2216 }
2217
2218 static void
2219 ixlv_add_multi(struct ixl_vsi *vsi)
2220 {
2221         struct ifmultiaddr      *ifma;
2222         struct ifnet            *ifp = vsi->ifp;
2223         struct ixlv_sc  *sc = vsi->back;
2224         int                     mcnt = 0;
2225
2226         IOCTL_DBG_IF(ifp, "begin");
2227
2228         if_maddr_rlock(ifp);
2229         /*
2230         ** Get a count, to decide if we
2231         ** simply use multicast promiscuous.
2232         */
2233         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2234                 if (ifma->ifma_addr->sa_family != AF_LINK)
2235                         continue;
2236                 mcnt++;
2237         }
2238         if_maddr_runlock(ifp);
2239
2240         // TODO: Remove -- cannot set promiscuous mode in a VF
2241         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2242                 /* delete all multicast filters */
2243                 ixlv_init_multi(vsi);
2244                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2245                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2246                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2247                     sc);
2248                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2249                 return;
2250         }
2251
2252         mcnt = 0;
2253         if_maddr_rlock(ifp);
2254         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2255                 if (ifma->ifma_addr->sa_family != AF_LINK)
2256                         continue;
2257                 if (!ixlv_add_mac_filter(sc,
2258                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2259                     IXL_FILTER_MC))
2260                         mcnt++;
2261         }
2262         if_maddr_runlock(ifp);
2263         /*
2264         ** Notify AQ task that sw filters need to be
2265         ** added to hw list
2266         */
2267         if (mcnt > 0)
2268                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2269                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2270                     sc);
2271
2272         IOCTL_DBG_IF(ifp, "end");
2273 }
2274
2275 static void
2276 ixlv_del_multi(struct ixl_vsi *vsi)
2277 {
2278         struct ixlv_mac_filter *f;
2279         struct ifmultiaddr      *ifma;
2280         struct ifnet            *ifp = vsi->ifp;
2281         struct ixlv_sc  *sc = vsi->back;
2282         int                     mcnt = 0;
2283         bool            match = FALSE;
2284
2285         IOCTL_DBG_IF(ifp, "begin");
2286
2287         /* Search for removed multicast addresses */
2288         if_maddr_rlock(ifp);
2289         SLIST_FOREACH(f, sc->mac_filters, next) {
2290                 if ((f->flags & IXL_FILTER_USED)
2291                     && (f->flags & IXL_FILTER_MC)) {
2292                         /* check if mac address in filter is in sc's list */
2293                         match = FALSE;
2294                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2295                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2296                                         continue;
2297                                 u8 *mc_addr =
2298                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2299                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2300                                         match = TRUE;
2301                                         break;
2302                                 }
2303                         }
2304                         /* if this filter is not in the sc's list, remove it */
2305                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2306                                 f->flags |= IXL_FILTER_DEL;
2307                                 mcnt++;
2308                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2309                                     MAC_FORMAT_ARGS(f->macaddr));
2310                         }
2311                         else if (match == FALSE)
2312                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2313                                     MAC_FORMAT_ARGS(f->macaddr));
2314                 }
2315         }
2316         if_maddr_runlock(ifp);
2317
2318         if (mcnt > 0)
2319                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2320                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2321                     sc);
2322
2323         IOCTL_DBG_IF(ifp, "end");
2324 }
2325
2326 /*********************************************************************
2327  *  Timer routine
2328  *
2329  *  This routine checks for link status,updates statistics,
2330  *  and runs the watchdog check.
2331  *
2332  **********************************************************************/
2333
2334 static void
2335 ixlv_local_timer(void *arg)
2336 {
2337         struct ixlv_sc  *sc = arg;
2338         struct i40e_hw          *hw = &sc->hw;
2339         struct ixl_vsi          *vsi = &sc->vsi;
2340         struct ixl_queue        *que = vsi->queues;
2341         device_t                dev = sc->dev;
2342         int                     hung = 0;
2343         u32                     mask, val;
2344
2345         IXLV_CORE_LOCK_ASSERT(sc);
2346
2347         /* If Reset is in progress just bail */
2348         if (sc->init_state == IXLV_RESET_PENDING)
2349                 return;
2350
2351         /* Check for when PF triggers a VF reset */
2352         val = rd32(hw, I40E_VFGEN_RSTAT) &
2353             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2354
2355         if (val != I40E_VFR_VFACTIVE
2356             && val != I40E_VFR_COMPLETED) {
2357                 DDPRINTF(dev, "reset in progress! (%d)", val);
2358                 return;
2359         }
2360
2361         ixlv_request_stats(sc);
2362
2363         /* clean and process any events */
2364         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2365
2366         /*
2367         ** Check status on the queues for a hang
2368         */
2369         mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2370             I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2371
2372         for (int i = 0; i < vsi->num_queues; i++,que++) {
2373                 /* Any queues with outstanding work get a sw irq */
2374                 if (que->busy)
2375                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2376                 /*
2377                 ** Each time txeof runs without cleaning, but there
2378                 ** are uncleaned descriptors it increments busy. If
2379                 ** we get to 5 we declare it hung.
2380                 */
2381                 if (que->busy == IXL_QUEUE_HUNG) {
2382                         ++hung;
2383                         /* Mark the queue as inactive */
2384                         vsi->active_queues &= ~((u64)1 << que->me);
2385                         continue;
2386                 } else {
2387                         /* Check if we've come back from hung */
2388                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2389                                 vsi->active_queues |= ((u64)1 << que->me);
2390                 }
2391                 if (que->busy >= IXL_MAX_TX_BUSY) {
2392                         device_printf(dev,"Warning queue %d "
2393                             "appears to be hung!\n", i);
2394                         que->busy = IXL_QUEUE_HUNG;
2395                         ++hung;
2396                 }
2397         }
2398         /* Only reset when all queues show hung */
2399         if (hung == vsi->num_queues)
2400                 goto hung;
2401         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2402         return;
2403
2404 hung:
2405         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2406         sc->init_state = IXLV_RESET_REQUIRED;
2407         ixlv_init_locked(sc);
2408 }
2409
2410 /*
2411 ** Note: this routine updates the OS on the link state
2412 **      the real check of the hardware only happens with
2413 **      a link interrupt.
2414 */
2415 void
2416 ixlv_update_link_status(struct ixlv_sc *sc)
2417 {
2418         struct ixl_vsi          *vsi = &sc->vsi;
2419         struct ifnet            *ifp = vsi->ifp;
2420         device_t                 dev = sc->dev;
2421
2422         if (vsi->link_up){ 
2423                 if (vsi->link_active == FALSE) {
2424                         if (bootverbose)
2425                                 device_printf(dev,"Link is Up, %d Gbps\n",
2426                                     (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2427                         vsi->link_active = TRUE;
2428                         if_link_state_change(ifp, LINK_STATE_UP);
2429                 }
2430         } else { /* Link down */
2431                 if (vsi->link_active == TRUE) {
2432                         if (bootverbose)
2433                                 device_printf(dev,"Link is Down\n");
2434                         if_link_state_change(ifp, LINK_STATE_DOWN);
2435                         vsi->link_active = FALSE;
2436                 }
2437         }
2438
2439         return;
2440 }
2441
2442 /*********************************************************************
2443  *
2444  *  This routine disables all traffic on the adapter by issuing a
2445  *  global reset on the MAC and deallocates TX/RX buffers.
2446  *
2447  **********************************************************************/
2448
2449 static void
2450 ixlv_stop(struct ixlv_sc *sc)
2451 {
2452         struct ifnet *ifp;
2453         int start;
2454
2455         ifp = sc->vsi.ifp;
2456         INIT_DBG_IF(ifp, "begin");
2457
2458         IXLV_CORE_LOCK_ASSERT(sc);
2459
2460         ixl_vc_flush(&sc->vc_mgr);
2461         ixlv_disable_queues(sc);
2462
2463         start = ticks;
2464         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2465             ((ticks - start) < hz/10))
2466                 ixlv_do_adminq_locked(sc);
2467
2468         /* Stop the local timer */
2469         callout_stop(&sc->timer);
2470
2471         INIT_DBG_IF(ifp, "end");
2472 }
2473
2474
2475 /*********************************************************************
2476  *
2477  *  Free all station queue structs.
2478  *
2479  **********************************************************************/
2480 static void
2481 ixlv_free_queues(struct ixl_vsi *vsi)
2482 {
2483         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2484         struct ixl_queue        *que = vsi->queues;
2485
2486         for (int i = 0; i < vsi->num_queues; i++, que++) {
2487                 struct tx_ring *txr = &que->txr;
2488                 struct rx_ring *rxr = &que->rxr;
2489         
2490                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2491                         continue;
2492                 IXL_TX_LOCK(txr);
2493                 ixl_free_que_tx(que);
2494                 if (txr->base)
2495                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2496                 IXL_TX_UNLOCK(txr);
2497                 IXL_TX_LOCK_DESTROY(txr);
2498
2499                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2500                         continue;
2501                 IXL_RX_LOCK(rxr);
2502                 ixl_free_que_rx(que);
2503                 if (rxr->base)
2504                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2505                 IXL_RX_UNLOCK(rxr);
2506                 IXL_RX_LOCK_DESTROY(rxr);
2507                 
2508         }
2509         free(vsi->queues, M_DEVBUF);
2510 }
2511
2512
2513 /*
2514 ** ixlv_config_rss - setup RSS 
2515 **
2516 ** RSS keys and table are cleared on VF reset.
2517 */
2518 static void
2519 ixlv_config_rss(struct ixlv_sc *sc)
2520 {
2521         struct i40e_hw  *hw = &sc->hw;
2522         struct ixl_vsi  *vsi = &sc->vsi;
2523         u32             lut = 0;
2524         u64             set_hena, hena;
2525         int             i, j;
2526
2527         /* set up random bits */
2528         static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
2529             0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
2530             0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
2531             0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
2532             0x4954b126 };
2533
2534         /* Don't set up RSS if using a single queue */
2535         if (vsi->num_queues == 1) {
2536                 wr32(hw, I40E_VFQF_HENA(0), 0);
2537                 wr32(hw, I40E_VFQF_HENA(1), 0);
2538                 ixl_flush(hw);
2539                 return;
2540         }
2541
2542         /* Fill out hash function seed */
2543         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2544                 wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
2545
2546         /* Enable PCTYPES for RSS: */
2547         set_hena =
2548                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2549                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2550                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2551                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2552                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2553                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2554                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2555                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2556                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2557                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2558                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2559
2560         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2561             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2562         hena |= set_hena;
2563         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2564         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2565
2566         /* Populate the LUT with max no. of queues in round robin fashion */
2567         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; j++) {
2568                 if (j == vsi->num_queues)
2569                         j = 0;
2570                 /* lut = 4-byte sliding window of 4 lut entries */
2571                 lut = (lut << 8) | (j & 0xF);
2572                 /* On i = 3, we have 4 entries in lut; write to the register */
2573                 if ((j & 3) == 3) {
2574                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2575                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2576                         i++;
2577                 }
2578         }
2579         ixl_flush(hw);
2580 }
2581
2582
2583 /*
2584 ** This routine refreshes vlan filters, called by init
2585 ** it scans the filter table and then updates the AQ
2586 */
2587 static void
2588 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2589 {
2590         struct ixl_vsi                  *vsi = &sc->vsi;
2591         struct ixlv_vlan_filter *f;
2592         int                             cnt = 0;
2593
2594         if (vsi->num_vlans == 0)
2595                 return;
2596         /*
2597         ** Scan the filter table for vlan entries,
2598         ** and if found call for the AQ update.
2599         */
2600         SLIST_FOREACH(f, sc->vlan_filters, next)
2601                 if (f->flags & IXL_FILTER_ADD)
2602                         cnt++;
2603         if (cnt > 0)
2604                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2605                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2606 }
2607
2608
2609 /*
2610 ** This routine adds new MAC filters to the sc's list;
2611 ** these are later added in hardware by sending a virtual
2612 ** channel message.
2613 */
2614 static int
2615 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2616 {
2617         struct ixlv_mac_filter  *f;
2618         device_t                        dev = sc->dev;
2619
2620         /* Does one already exist? */
2621         f = ixlv_find_mac_filter(sc, macaddr);
2622         if (f != NULL) {
2623                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2624                     MAC_FORMAT_ARGS(macaddr));
2625                 return (EEXIST);
2626         }
2627
2628         /* If not, get a new empty filter */
2629         f = ixlv_get_mac_filter(sc);
2630         if (f == NULL) {
2631                 device_printf(dev, "%s: no filters available!!\n",
2632                     __func__);
2633                 return (ENOMEM);
2634         }
2635
2636         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2637             MAC_FORMAT_ARGS(macaddr));
2638
2639         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2640         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2641         f->flags |= flags;
2642         return (0);
2643 }
2644
2645 /*
2646 ** Marks a MAC filter for deletion.
2647 */
2648 static int
2649 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2650 {
2651         struct ixlv_mac_filter  *f;
2652
2653         f = ixlv_find_mac_filter(sc, macaddr);
2654         if (f == NULL)
2655                 return (ENOENT);
2656
2657         f->flags |= IXL_FILTER_DEL;
2658         return (0);
2659 }
2660
2661 /*
2662 ** Tasklet handler for MSIX Adminq interrupts
2663 **  - done outside interrupt context since it might sleep
2664 */
2665 static void
2666 ixlv_do_adminq(void *context, int pending)
2667 {
2668         struct ixlv_sc          *sc = context;
2669
2670         mtx_lock(&sc->mtx);
2671         ixlv_do_adminq_locked(sc);
2672         mtx_unlock(&sc->mtx);
2673         return;
2674 }
2675
2676 static void
2677 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2678 {
2679         struct i40e_hw                  *hw = &sc->hw;
2680         struct i40e_arq_event_info      event;
2681         struct i40e_virtchnl_msg        *v_msg;
2682         i40e_status                     ret;
2683         u16                             result = 0;
2684
2685         IXLV_CORE_LOCK_ASSERT(sc);
2686
2687         event.buf_len = IXL_AQ_BUF_SZ;
2688         event.msg_buf = sc->aq_buffer;
2689         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2690
2691         do {
2692                 ret = i40e_clean_arq_element(hw, &event, &result);
2693                 if (ret)
2694                         break;
2695                 ixlv_vc_completion(sc, v_msg->v_opcode,
2696                     v_msg->v_retval, event.msg_buf, event.msg_len);
2697                 if (result != 0)
2698                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2699         } while (result);
2700
2701         ixlv_enable_adminq_irq(hw);
2702 }
2703
2704 static void
2705 ixlv_add_sysctls(struct ixlv_sc *sc)
2706 {
2707         device_t dev = sc->dev;
2708         struct ixl_vsi *vsi = &sc->vsi;
2709         struct i40e_eth_stats *es = &vsi->eth_stats;
2710
2711         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2712         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2713         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2714
2715         struct sysctl_oid *vsi_node, *queue_node;
2716         struct sysctl_oid_list *vsi_list, *queue_list;
2717
2718 #define QUEUE_NAME_LEN 32
2719         char queue_namebuf[QUEUE_NAME_LEN];
2720
2721         struct ixl_queue *queues = vsi->queues;
2722         struct tx_ring *txr;
2723         struct rx_ring *rxr;
2724
2725         /* Driver statistics sysctls */
2726         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2727                         CTLFLAG_RD, &sc->watchdog_events,
2728                         "Watchdog timeouts");
2729         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2730                         CTLFLAG_RD, &sc->admin_irq,
2731                         "Admin Queue IRQ Handled");
2732
2733         /* VSI statistics sysctls */
2734         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2735                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2736         vsi_list = SYSCTL_CHILDREN(vsi_node);
2737
2738         struct ixl_sysctl_info ctls[] =
2739         {
2740                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2741                 {&es->rx_unicast, "ucast_pkts_rcvd",
2742                         "Unicast Packets Received"},
2743                 {&es->rx_multicast, "mcast_pkts_rcvd",
2744                         "Multicast Packets Received"},
2745                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2746                         "Broadcast Packets Received"},
2747                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2748                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2749                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2750                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2751                 {&es->tx_multicast, "mcast_pkts_txd",
2752                         "Multicast Packets Transmitted"},
2753                 {&es->tx_broadcast, "bcast_pkts_txd",
2754                         "Broadcast Packets Transmitted"},
2755                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2756                 // end
2757                 {0,0,0}
2758         };
2759         struct ixl_sysctl_info *entry = ctls;
2760         while (entry->stat != 0)
2761         {
2762                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2763                                 CTLFLAG_RD, entry->stat,
2764                                 entry->description);
2765                 entry++;
2766         }
2767
2768         /* Queue sysctls */
2769         for (int q = 0; q < vsi->num_queues; q++) {
2770                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2771                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2772                                              CTLFLAG_RD, NULL, "Queue Name");
2773                 queue_list = SYSCTL_CHILDREN(queue_node);
2774
2775                 txr = &(queues[q].txr);
2776                 rxr = &(queues[q].rxr);
2777
2778                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2779                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2780                                 "m_defrag() failed");
2781                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2782                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2783                                 "Driver dropped packets");
2784                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2785                                 CTLFLAG_RD, &(queues[q].irqs),
2786                                 "irqs on this queue");
2787                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2788                                 CTLFLAG_RD, &(queues[q].tso),
2789                                 "TSO");
2790                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2791                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2792                                 "Driver tx dma failure in xmit");
2793                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2794                                 CTLFLAG_RD, &(txr->no_desc),
2795                                 "Queue No Descriptor Available");
2796                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2797                                 CTLFLAG_RD, &(txr->total_packets),
2798                                 "Queue Packets Transmitted");
2799                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2800                                 CTLFLAG_RD, &(txr->tx_bytes),
2801                                 "Queue Bytes Transmitted");
2802                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2803                                 CTLFLAG_RD, &(rxr->rx_packets),
2804                                 "Queue Packets Received");
2805                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2806                                 CTLFLAG_RD, &(rxr->rx_bytes),
2807                                 "Queue Bytes Received");
2808
2809                 /* Examine queue state */
2810                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2811                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2812                                 sizeof(struct ixl_queue),
2813                                 ixlv_sysctl_qtx_tail_handler, "IU",
2814                                 "Queue Transmit Descriptor Tail");
2815                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2816                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2817                                 sizeof(struct ixl_queue),
2818                                 ixlv_sysctl_qrx_tail_handler, "IU",
2819                                 "Queue Receive Descriptor Tail");
2820         }
2821 }
2822
2823 static void
2824 ixlv_init_filters(struct ixlv_sc *sc)
2825 {
2826         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2827             M_DEVBUF, M_NOWAIT | M_ZERO);
2828         SLIST_INIT(sc->mac_filters);
2829         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2830             M_DEVBUF, M_NOWAIT | M_ZERO);
2831         SLIST_INIT(sc->vlan_filters);
2832         return;
2833 }
2834
2835 static void
2836 ixlv_free_filters(struct ixlv_sc *sc)
2837 {
2838         struct ixlv_mac_filter *f;
2839         struct ixlv_vlan_filter *v;
2840
2841         while (!SLIST_EMPTY(sc->mac_filters)) {
2842                 f = SLIST_FIRST(sc->mac_filters);
2843                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2844                 free(f, M_DEVBUF);
2845         }
2846         while (!SLIST_EMPTY(sc->vlan_filters)) {
2847                 v = SLIST_FIRST(sc->vlan_filters);
2848                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2849                 free(v, M_DEVBUF);
2850         }
2851         return;
2852 }
2853
2854 /**
2855  * ixlv_sysctl_qtx_tail_handler
2856  * Retrieves I40E_QTX_TAIL1 value from hardware
2857  * for a sysctl.
2858  */
2859 static int 
2860 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2861 {
2862         struct ixl_queue *que;
2863         int error;
2864         u32 val;
2865
2866         que = ((struct ixl_queue *)oidp->oid_arg1);
2867         if (!que) return 0;
2868
2869         val = rd32(que->vsi->hw, que->txr.tail);
2870         error = sysctl_handle_int(oidp, &val, 0, req);
2871         if (error || !req->newptr)
2872                 return error;
2873         return (0);
2874 }
2875
2876 /**
2877  * ixlv_sysctl_qrx_tail_handler
2878  * Retrieves I40E_QRX_TAIL1 value from hardware
2879  * for a sysctl.
2880  */
2881 static int 
2882 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2883 {
2884         struct ixl_queue *que;
2885         int error;
2886         u32 val;
2887
2888         que = ((struct ixl_queue *)oidp->oid_arg1);
2889         if (!que) return 0;
2890
2891         val = rd32(que->vsi->hw, que->rxr.tail);
2892         error = sysctl_handle_int(oidp, &val, 0, req);
2893         if (error || !req->newptr)
2894                 return error;
2895         return (0);
2896 }
2897