]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
MFC r313497: ixl(4): Update to 1.7.12-k.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixl.h"
36 #include "ixlv.h"
37
38 /*********************************************************************
39  *  Driver version
40  *********************************************************************/
41 char ixlv_driver_version[] = "1.4.12-k";
42
43 /*********************************************************************
44  *  PCI Device ID Table
45  *
46  *  Used by probe to select devices to load on
47  *  Last field stores an index into ixlv_strings
48  *  Last entry must be all 0s
49  *
50  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
51  *********************************************************************/
52
53 static ixl_vendor_info_t ixlv_vendor_info_array[] =
54 {
55         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
56         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
57         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
58         /* required last entry */
59         {0, 0, 0, 0, 0}
60 };
61
62 /*********************************************************************
63  *  Table of branding strings
64  *********************************************************************/
65
66 static char    *ixlv_strings[] = {
67         "Intel(R) Ethernet Connection XL710/X722 VF Driver"
68 };
69
70
71 /*********************************************************************
72  *  Function prototypes
73  *********************************************************************/
74 static int      ixlv_probe(device_t);
75 static int      ixlv_attach(device_t);
76 static int      ixlv_detach(device_t);
77 static int      ixlv_shutdown(device_t);
78 static void     ixlv_init_locked(struct ixlv_sc *);
79 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
80 static void     ixlv_free_pci_resources(struct ixlv_sc *);
81 static int      ixlv_assign_msix(struct ixlv_sc *);
82 static int      ixlv_init_msix(struct ixlv_sc *);
83 static int      ixlv_init_taskqueue(struct ixlv_sc *);
84 static int      ixlv_setup_queues(struct ixlv_sc *);
85 static void     ixlv_config_rss(struct ixlv_sc *);
86 static void     ixlv_stop(struct ixlv_sc *);
87 static void     ixlv_add_multi(struct ixl_vsi *);
88 static void     ixlv_del_multi(struct ixl_vsi *);
89 static void     ixlv_free_queues(struct ixl_vsi *);
90 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
91 static int      ixlv_teardown_adminq_msix(struct ixlv_sc *);
92
93 static int      ixlv_media_change(struct ifnet *);
94 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
95
96 static void     ixlv_local_timer(void *);
97
98 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void     ixlv_init_filters(struct ixlv_sc *);
101 static void     ixlv_free_filters(struct ixlv_sc *);
102
103 static void     ixlv_msix_que(void *);
104 static void     ixlv_msix_adminq(void *);
105 static void     ixlv_do_adminq(void *, int);
106 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void     ixlv_handle_que(void *, int);
108 static int      ixlv_reset(struct ixlv_sc *);
109 static int      ixlv_reset_complete(struct i40e_hw *);
110 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113                     enum i40e_status_code);
114 static void     ixlv_configure_itr(struct ixlv_sc *);
115
116 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
117 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
118 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
119 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
120
121 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
122 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
123 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
124
125 static void     ixlv_init_hw(struct ixlv_sc *);
126 static int      ixlv_setup_vc(struct ixlv_sc *);
127 static int      ixlv_vf_config(struct ixlv_sc *);
128
129 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
130                     struct ifnet *, int);
131
132 static void     ixlv_add_sysctls(struct ixlv_sc *);
133 #ifdef IXL_DEBUG
134 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
135 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
136 #endif
137
138 /*********************************************************************
139  *  FreeBSD Device Interface Entry Points
140  *********************************************************************/
141
142 static device_method_t ixlv_methods[] = {
143         /* Device interface */
144         DEVMETHOD(device_probe, ixlv_probe),
145         DEVMETHOD(device_attach, ixlv_attach),
146         DEVMETHOD(device_detach, ixlv_detach),
147         DEVMETHOD(device_shutdown, ixlv_shutdown),
148         {0, 0}
149 };
150
151 static driver_t ixlv_driver = {
152         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
153 };
154
155 devclass_t ixlv_devclass;
156 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
157
158 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
159 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
160
161 /*
162 ** TUNEABLE PARAMETERS:
163 */
164
165 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
166                    "IXLV driver parameters");
167
168 /*
169 ** Number of descriptors per ring:
170 **   - TX and RX are the same size
171 */
172 static int ixlv_ringsz = IXL_DEFAULT_RING;
173 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
174 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
175     &ixlv_ringsz, 0, "Descriptor Ring Size");
176
177 /* Set to zero to auto calculate  */
178 int ixlv_max_queues = 0;
179 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
180 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
181     &ixlv_max_queues, 0, "Number of Queues");
182
183 /*
184 ** Number of entries in Tx queue buf_ring.
185 ** Increasing this will reduce the number of
186 ** errors when transmitting fragmented UDP
187 ** packets.
188 */
189 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
190 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
192     &ixlv_txbrsz, 0, "TX Buf Ring Size");
193
194 /*
195 ** Controls for Interrupt Throttling
196 **      - true/false for dynamic adjustment
197 **      - default values for static ITR
198 */
199 int ixlv_dynamic_rx_itr = 0;
200 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
201 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
202     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
203
204 int ixlv_dynamic_tx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
207     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
208
209 int ixlv_rx_itr = IXL_ITR_8K;
210 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
212     &ixlv_rx_itr, 0, "RX Interrupt Rate");
213
214 int ixlv_tx_itr = IXL_ITR_4K;
215 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
217     &ixlv_tx_itr, 0, "TX Interrupt Rate");
218
219         
220 /*********************************************************************
221  *  Device identification routine
222  *
223  *  ixlv_probe determines if the driver should be loaded on
224  *  the hardware based on PCI vendor/device id of the device.
225  *
226  *  return BUS_PROBE_DEFAULT on success, positive on failure
227  *********************************************************************/
228
229 static int
230 ixlv_probe(device_t dev)
231 {
232         ixl_vendor_info_t *ent;
233
234         u16     pci_vendor_id, pci_device_id;
235         u16     pci_subvendor_id, pci_subdevice_id;
236         char    device_name[256];
237
238 #if 0
239         INIT_DEBUGOUT("ixlv_probe: begin");
240 #endif
241
242         pci_vendor_id = pci_get_vendor(dev);
243         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
244                 return (ENXIO);
245
246         pci_device_id = pci_get_device(dev);
247         pci_subvendor_id = pci_get_subvendor(dev);
248         pci_subdevice_id = pci_get_subdevice(dev);
249
250         ent = ixlv_vendor_info_array;
251         while (ent->vendor_id != 0) {
252                 if ((pci_vendor_id == ent->vendor_id) &&
253                     (pci_device_id == ent->device_id) &&
254
255                     ((pci_subvendor_id == ent->subvendor_id) ||
256                      (ent->subvendor_id == 0)) &&
257
258                     ((pci_subdevice_id == ent->subdevice_id) ||
259                      (ent->subdevice_id == 0))) {
260                         sprintf(device_name, "%s, Version - %s",
261                                 ixlv_strings[ent->index],
262                                 ixlv_driver_version);
263                         device_set_desc_copy(dev, device_name);
264                         return (BUS_PROBE_DEFAULT);
265                 }
266                 ent++;
267         }
268         return (ENXIO);
269 }
270
271 /*********************************************************************
272  *  Device initialization routine
273  *
274  *  The attach entry point is called when the driver is being loaded.
275  *  This routine identifies the type of hardware, allocates all resources
276  *  and initializes the hardware.
277  *
278  *  return 0 on success, positive on failure
279  *********************************************************************/
280
281 static int
282 ixlv_attach(device_t dev)
283 {
284         struct ixlv_sc  *sc;
285         struct i40e_hw  *hw;
286         struct ixl_vsi  *vsi;
287         int             error = 0;
288
289         INIT_DBG_DEV(dev, "begin");
290
291         /* Allocate, clear, and link in our primary soft structure */
292         sc = device_get_softc(dev);
293         sc->dev = sc->osdep.dev = dev;
294         hw = &sc->hw;
295         vsi = &sc->vsi;
296         vsi->dev = dev;
297
298         /* Initialize hw struct */
299         ixlv_init_hw(sc);
300
301         /* Allocate filter lists */
302         ixlv_init_filters(sc);
303
304         /* Core Lock Init */
305         mtx_init(&sc->mtx, device_get_nameunit(dev),
306             "IXL SC Lock", MTX_DEF);
307
308         /* Set up the timer callout */
309         callout_init_mtx(&sc->timer, &sc->mtx, 0);
310
311         /* Do PCI setup - map BAR0, etc */
312         if (ixlv_allocate_pci_resources(sc)) {
313                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
314                     __func__);
315                 error = ENXIO;
316                 goto err_early;
317         }
318
319         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
320
321         error = i40e_set_mac_type(hw);
322         if (error) {
323                 device_printf(dev, "%s: set_mac_type failed: %d\n",
324                     __func__, error);
325                 goto err_pci_res;
326         }
327
328         error = ixlv_reset_complete(hw);
329         if (error) {
330                 device_printf(dev, "%s: Device is still being reset\n",
331                     __func__);
332                 goto err_pci_res;
333         }
334
335         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
336
337         error = ixlv_setup_vc(sc);
338         if (error) {
339                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
340                     __func__, error);
341                 goto err_pci_res;
342         }
343
344         INIT_DBG_DEV(dev, "PF API version verified");
345
346         /* Need API version before sending reset message */
347         error = ixlv_reset(sc);
348         if (error) {
349                 device_printf(dev, "VF reset failed; reload the driver\n");
350                 goto err_aq;
351         }
352
353         INIT_DBG_DEV(dev, "VF reset complete");
354
355         /* Ask for VF config from PF */
356         error = ixlv_vf_config(sc);
357         if (error) {
358                 device_printf(dev, "Error getting configuration from PF: %d\n",
359                     error);
360                 goto err_aq;
361         }
362
363         device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
364             sc->vf_res->num_vsis,
365             sc->vf_res->num_queue_pairs,
366             sc->vf_res->max_vectors,
367             sc->vf_res->rss_key_size,
368             sc->vf_res->rss_lut_size);
369 #ifdef IXL_DEBUG
370         device_printf(dev, "Offload flags: 0x%b\n",
371             sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
372 #endif
373
374         /* got VF config message back from PF, now we can parse it */
375         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
376                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
377                         sc->vsi_res = &sc->vf_res->vsi_res[i];
378         }
379         if (!sc->vsi_res) {
380                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
381                 error = EIO;
382                 goto err_res_buf;
383         }
384
385         INIT_DBG_DEV(dev, "Resource Acquisition complete");
386
387         /* If no mac address was assigned just make a random one */
388         if (!ixlv_check_ether_addr(hw->mac.addr)) {
389                 u8 addr[ETHER_ADDR_LEN];
390                 arc4rand(&addr, sizeof(addr), 0);
391                 addr[0] &= 0xFE;
392                 addr[0] |= 0x02;
393                 bcopy(addr, hw->mac.addr, sizeof(addr));
394         }
395
396         /* Now that the number of queues for this VF is known, set up interrupts */
397         sc->msix = ixlv_init_msix(sc);
398         /* We fail without MSIX support */
399         if (sc->msix == 0) {
400                 error = ENXIO;
401                 goto err_res_buf;
402         }
403
404         vsi->id = sc->vsi_res->vsi_id;
405         vsi->back = (void *)sc;
406         sc->link_up = TRUE;
407
408         /* This allocates the memory and early settings */
409         if (ixlv_setup_queues(sc) != 0) {
410                 device_printf(dev, "%s: setup queues failed!\n",
411                     __func__);
412                 error = EIO;
413                 goto out;
414         }
415
416         /* Setup the stack interface */
417         if (ixlv_setup_interface(dev, sc) != 0) {
418                 device_printf(dev, "%s: setup interface failed!\n",
419                     __func__);
420                 error = EIO;
421                 goto out;
422         }
423
424         INIT_DBG_DEV(dev, "Queue memory and interface setup");
425
426         /* Do queue interrupt setup */
427         if (ixlv_assign_msix(sc) != 0) {
428                 device_printf(dev, "%s: allocating queue interrupts failed!\n",
429                     __func__);
430                 error = ENXIO;
431                 goto out;
432         }
433
434         /* Start AdminQ taskqueue */
435         ixlv_init_taskqueue(sc);
436
437         /* Initialize stats */
438         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
439         ixlv_add_sysctls(sc);
440
441         /* Register for VLAN events */
442         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
443             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
444         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
445             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
446
447         /* We want AQ enabled early */
448         ixlv_enable_adminq_irq(hw);
449
450         /* Set things up to run init */
451         sc->init_state = IXLV_INIT_READY;
452
453         ixl_vc_init_mgr(sc, &sc->vc_mgr);
454
455         INIT_DBG_DEV(dev, "end");
456         return (error);
457
458 out:
459         ixlv_free_queues(vsi);
460 err_res_buf:
461         free(sc->vf_res, M_DEVBUF);
462 err_aq:
463         i40e_shutdown_adminq(hw);
464 err_pci_res:
465         ixlv_free_pci_resources(sc);
466 err_early:
467         mtx_destroy(&sc->mtx);
468         ixlv_free_filters(sc);
469         INIT_DBG_DEV(dev, "end: error %d", error);
470         return (error);
471 }
472
473 /*********************************************************************
474  *  Device removal routine
475  *
476  *  The detach entry point is called when the driver is being removed.
477  *  This routine stops the adapter and deallocates all the resources
478  *  that were allocated for driver operation.
479  *
480  *  return 0 on success, positive on failure
481  *********************************************************************/
482
483 static int
484 ixlv_detach(device_t dev)
485 {
486         struct ixlv_sc  *sc = device_get_softc(dev);
487         struct ixl_vsi  *vsi = &sc->vsi;
488         struct i40e_hw  *hw = &sc->hw;
489         enum i40e_status_code   status;
490
491         INIT_DBG_DEV(dev, "begin");
492
493         /* Make sure VLANS are not using driver */
494         if (vsi->ifp->if_vlantrunk != NULL) {
495                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
496                 return (EBUSY);
497         }
498
499         /* Stop driver */
500         ether_ifdetach(vsi->ifp);
501         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
502                 mtx_lock(&sc->mtx);     
503                 ixlv_stop(sc);
504                 mtx_unlock(&sc->mtx);   
505         }
506
507         /* Unregister VLAN events */
508         if (vsi->vlan_attach != NULL)
509                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
510         if (vsi->vlan_detach != NULL)
511                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
512
513         /* Drain VC mgr */
514         callout_drain(&sc->vc_mgr.callout);
515
516         ixlv_disable_adminq_irq(hw);
517         ixlv_teardown_adminq_msix(sc);
518         /* Drain admin queue taskqueue */
519         taskqueue_free(sc->tq);
520         status = i40e_shutdown_adminq(&sc->hw);
521         if (status != I40E_SUCCESS) {
522                 device_printf(dev,
523                     "i40e_shutdown_adminq() failed with status %s\n",
524                     i40e_stat_str(hw, status));
525         }
526
527         if_free(vsi->ifp);
528         free(sc->vf_res, M_DEVBUF);
529         ixlv_free_pci_resources(sc);
530         ixlv_free_queues(vsi);
531         ixlv_free_filters(sc);
532
533         bus_generic_detach(dev);
534         mtx_destroy(&sc->mtx);
535         INIT_DBG_DEV(dev, "end");
536         return (0);
537 }
538
539 /*********************************************************************
540  *
541  *  Shutdown entry point
542  *
543  **********************************************************************/
544
545 static int
546 ixlv_shutdown(device_t dev)
547 {
548         struct ixlv_sc  *sc = device_get_softc(dev);
549
550         INIT_DBG_DEV(dev, "begin");
551
552         mtx_lock(&sc->mtx);     
553         ixlv_stop(sc);
554         mtx_unlock(&sc->mtx);   
555
556         INIT_DBG_DEV(dev, "end");
557         return (0);
558 }
559
560 /*
561  * Configure TXCSUM(IPV6) and TSO(4/6)
562  *      - the hardware handles these together so we
563  *        need to tweak them 
564  */
565 static void
566 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
567 {
568         /* Enable/disable TXCSUM/TSO4 */
569         if (!(ifp->if_capenable & IFCAP_TXCSUM)
570             && !(ifp->if_capenable & IFCAP_TSO4)) {
571                 if (mask & IFCAP_TXCSUM) {
572                         ifp->if_capenable |= IFCAP_TXCSUM;
573                         /* enable TXCSUM, restore TSO if previously enabled */
574                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
575                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
576                                 ifp->if_capenable |= IFCAP_TSO4;
577                         }
578                 }
579                 else if (mask & IFCAP_TSO4) {
580                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
581                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
582                         if_printf(ifp,
583                             "TSO4 requires txcsum, enabling both...\n");
584                 }
585         } else if((ifp->if_capenable & IFCAP_TXCSUM)
586             && !(ifp->if_capenable & IFCAP_TSO4)) {
587                 if (mask & IFCAP_TXCSUM)
588                         ifp->if_capenable &= ~IFCAP_TXCSUM;
589                 else if (mask & IFCAP_TSO4)
590                         ifp->if_capenable |= IFCAP_TSO4;
591         } else if((ifp->if_capenable & IFCAP_TXCSUM)
592             && (ifp->if_capenable & IFCAP_TSO4)) {
593                 if (mask & IFCAP_TXCSUM) {
594                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
595                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
596                         if_printf(ifp, 
597                             "TSO4 requires txcsum, disabling both...\n");
598                 } else if (mask & IFCAP_TSO4)
599                         ifp->if_capenable &= ~IFCAP_TSO4;
600         }
601
602         /* Enable/disable TXCSUM_IPV6/TSO6 */
603         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604             && !(ifp->if_capenable & IFCAP_TSO6)) {
605                 if (mask & IFCAP_TXCSUM_IPV6) {
606                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
607                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
608                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
609                                 ifp->if_capenable |= IFCAP_TSO6;
610                         }
611                 } else if (mask & IFCAP_TSO6) {
612                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
613                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
614                         if_printf(ifp,
615                             "TSO6 requires txcsum6, enabling both...\n");
616                 }
617         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
618             && !(ifp->if_capenable & IFCAP_TSO6)) {
619                 if (mask & IFCAP_TXCSUM_IPV6)
620                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
621                 else if (mask & IFCAP_TSO6)
622                         ifp->if_capenable |= IFCAP_TSO6;
623         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
624             && (ifp->if_capenable & IFCAP_TSO6)) {
625                 if (mask & IFCAP_TXCSUM_IPV6) {
626                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
627                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
628                         if_printf(ifp,
629                             "TSO6 requires txcsum6, disabling both...\n");
630                 } else if (mask & IFCAP_TSO6)
631                         ifp->if_capenable &= ~IFCAP_TSO6;
632         }
633 }
634
635 /*********************************************************************
636  *  Ioctl entry point
637  *
638  *  ixlv_ioctl is called when the user wants to configure the
639  *  interface.
640  *
641  *  return 0 on success, positive on failure
642  **********************************************************************/
643
644 static int
645 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
646 {
647         struct ixl_vsi          *vsi = ifp->if_softc;
648         struct ixlv_sc  *sc = vsi->back;
649         struct ifreq            *ifr = (struct ifreq *)data;
650 #if defined(INET) || defined(INET6)
651         struct ifaddr           *ifa = (struct ifaddr *)data;
652         bool                    avoid_reset = FALSE;
653 #endif
654         int                     error = 0;
655
656
657         switch (command) {
658
659         case SIOCSIFADDR:
660 #ifdef INET
661                 if (ifa->ifa_addr->sa_family == AF_INET)
662                         avoid_reset = TRUE;
663 #endif
664 #ifdef INET6
665                 if (ifa->ifa_addr->sa_family == AF_INET6)
666                         avoid_reset = TRUE;
667 #endif
668 #if defined(INET) || defined(INET6)
669                 /*
670                 ** Calling init results in link renegotiation,
671                 ** so we avoid doing it when possible.
672                 */
673                 if (avoid_reset) {
674                         ifp->if_flags |= IFF_UP;
675                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
676                                 ixlv_init(vsi);
677 #ifdef INET
678                         if (!(ifp->if_flags & IFF_NOARP))
679                                 arp_ifinit(ifp, ifa);
680 #endif
681                 } else
682                         error = ether_ioctl(ifp, command, data);
683                 break;
684 #endif
685         case SIOCSIFMTU:
686                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
687                 mtx_lock(&sc->mtx);
688                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
689                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
690                         error = EINVAL;
691                         IOCTL_DBG_IF(ifp, "mtu too large");
692                 } else {
693                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
694                         // ERJ: Interestingly enough, these types don't match
695                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
696                         vsi->max_frame_size =
697                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
698                             + ETHER_VLAN_ENCAP_LEN;
699                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
700                                 ixlv_init_locked(sc);
701                 }
702                 mtx_unlock(&sc->mtx);
703                 break;
704         case SIOCSIFFLAGS:
705                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
706                 mtx_lock(&sc->mtx);
707                 if (ifp->if_flags & IFF_UP) {
708                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
709                                 ixlv_init_locked(sc);
710                 } else
711                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
712                                 ixlv_stop(sc);
713                 sc->if_flags = ifp->if_flags;
714                 mtx_unlock(&sc->mtx);
715                 break;
716         case SIOCADDMULTI:
717                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
718                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
719                         mtx_lock(&sc->mtx);
720                         ixlv_disable_intr(vsi);
721                         ixlv_add_multi(vsi);
722                         ixlv_enable_intr(vsi);
723                         mtx_unlock(&sc->mtx);
724                 }
725                 break;
726         case SIOCDELMULTI:
727                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
728                 if (sc->init_state == IXLV_RUNNING) {
729                         mtx_lock(&sc->mtx);
730                         ixlv_disable_intr(vsi);
731                         ixlv_del_multi(vsi);
732                         ixlv_enable_intr(vsi);
733                         mtx_unlock(&sc->mtx);
734                 }
735                 break;
736         case SIOCSIFMEDIA:
737         case SIOCGIFMEDIA:
738                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
739                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
740                 break;
741         case SIOCSIFCAP:
742         {
743                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
744                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
745
746                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
747
748                 if (mask & IFCAP_RXCSUM)
749                         ifp->if_capenable ^= IFCAP_RXCSUM;
750                 if (mask & IFCAP_RXCSUM_IPV6)
751                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
752                 if (mask & IFCAP_LRO)
753                         ifp->if_capenable ^= IFCAP_LRO;
754                 if (mask & IFCAP_VLAN_HWTAGGING)
755                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
756                 if (mask & IFCAP_VLAN_HWFILTER)
757                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
758                 if (mask & IFCAP_VLAN_HWTSO)
759                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
760                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
761                         ixlv_init(vsi);
762                 }
763                 VLAN_CAPABILITIES(ifp);
764
765                 break;
766         }
767
768         default:
769                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
770                 error = ether_ioctl(ifp, command, data);
771                 break;
772         }
773
774         return (error);
775 }
776
777 /*
778 ** To do a reinit on the VF is unfortunately more complicated
779 ** than a physical device, we must have the PF more or less
780 ** completely recreate our memory, so many things that were
781 ** done only once at attach in traditional drivers now must be
782 ** redone at each reinitialization. This function does that
783 ** 'prelude' so we can then call the normal locked init code.
784 */
785 int
786 ixlv_reinit_locked(struct ixlv_sc *sc)
787 {
788         struct i40e_hw          *hw = &sc->hw;
789         struct ixl_vsi          *vsi = &sc->vsi;
790         struct ifnet            *ifp = vsi->ifp;
791         struct ixlv_mac_filter  *mf, *mf_temp;
792         struct ixlv_vlan_filter *vf;
793         int                     error = 0;
794
795         INIT_DBG_IF(ifp, "begin");
796
797         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
798                 ixlv_stop(sc);
799
800         error = ixlv_reset(sc);
801
802         INIT_DBG_IF(ifp, "VF was reset");
803
804         /* set the state in case we went thru RESET */
805         sc->init_state = IXLV_RUNNING;
806
807         /*
808         ** Resetting the VF drops all filters from hardware;
809         ** we need to mark them to be re-added in init.
810         */
811         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
812                 if (mf->flags & IXL_FILTER_DEL) {
813                         SLIST_REMOVE(sc->mac_filters, mf,
814                             ixlv_mac_filter, next);
815                         free(mf, M_DEVBUF);
816                 } else
817                         mf->flags |= IXL_FILTER_ADD;
818         }
819         if (vsi->num_vlans != 0)
820                 SLIST_FOREACH(vf, sc->vlan_filters, next)
821                         vf->flags = IXL_FILTER_ADD;
822         else { /* clean any stale filters */
823                 while (!SLIST_EMPTY(sc->vlan_filters)) {
824                         vf = SLIST_FIRST(sc->vlan_filters);
825                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
826                         free(vf, M_DEVBUF);
827                 }
828         }
829
830         ixlv_enable_adminq_irq(hw);
831         ixl_vc_flush(&sc->vc_mgr);
832
833         INIT_DBG_IF(ifp, "end");
834         return (error);
835 }
836
837 static void
838 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
839         enum i40e_status_code code)
840 {
841         struct ixlv_sc *sc;
842
843         sc = arg;
844
845         /*
846          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
847          * happens while a command is in progress, so we don't print an error
848          * in that case.
849          */
850         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
851                 if_printf(sc->vsi.ifp,
852                     "Error %s waiting for PF to complete operation %d\n",
853                     i40e_stat_str(&sc->hw, code), cmd->request);
854         }
855 }
856
857 static void
858 ixlv_init_locked(struct ixlv_sc *sc)
859 {
860         struct i40e_hw          *hw = &sc->hw;
861         struct ixl_vsi          *vsi = &sc->vsi;
862         struct ixl_queue        *que = vsi->queues;
863         struct ifnet            *ifp = vsi->ifp;
864         int                      error = 0;
865
866         INIT_DBG_IF(ifp, "begin");
867
868         IXLV_CORE_LOCK_ASSERT(sc);
869
870         /* Do a reinit first if an init has already been done */
871         if ((sc->init_state == IXLV_RUNNING) ||
872             (sc->init_state == IXLV_RESET_REQUIRED) ||
873             (sc->init_state == IXLV_RESET_PENDING))
874                 error = ixlv_reinit_locked(sc);
875         /* Don't bother with init if we failed reinit */
876         if (error)
877                 goto init_done;
878
879         /* Remove existing MAC filter if new MAC addr is set */
880         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
881                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
882                 if (error == 0)
883                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
884                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
885                             sc);
886         }
887
888         /* Check for an LAA mac address... */
889         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
890
891         ifp->if_hwassist = 0;
892         if (ifp->if_capenable & IFCAP_TSO)
893                 ifp->if_hwassist |= CSUM_TSO;
894         if (ifp->if_capenable & IFCAP_TXCSUM)
895                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
896         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
897                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
898
899         /* Add mac filter for this VF to PF */
900         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
901                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
902                 if (!error || error == EEXIST)
903                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
904                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
905                             sc);
906         }
907
908         /* Setup vlan's if needed */
909         ixlv_setup_vlan_filters(sc);
910
911         /* Prepare the queues for operation */
912         for (int i = 0; i < vsi->num_queues; i++, que++) {
913                 struct  rx_ring *rxr = &que->rxr;
914
915                 ixl_init_tx_ring(que);
916
917                 if (vsi->max_frame_size <= MCLBYTES)
918                         rxr->mbuf_sz = MCLBYTES;
919                 else
920                         rxr->mbuf_sz = MJUMPAGESIZE;
921                 ixl_init_rx_ring(que);
922         }
923
924         /* Set initial ITR values */
925         ixlv_configure_itr(sc);
926
927         /* Configure queues */
928         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
929             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
930
931         /* Set up RSS */
932         ixlv_config_rss(sc);
933
934         /* Map vectors */
935         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
936             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
937
938         /* Enable queues */
939         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
940             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
941
942         /* Start the local timer */
943         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
944
945         sc->init_state = IXLV_RUNNING;
946
947 init_done:
948         INIT_DBG_IF(ifp, "end");
949         return;
950 }
951
952 /*
953 **  Init entry point for the stack
954 */
955 void
956 ixlv_init(void *arg)
957 {
958         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
959         struct ixlv_sc *sc = vsi->back;
960         int retries = 0;
961
962         /* Prevent init from running again while waiting for AQ calls
963          * made in init_locked() to complete. */
964         mtx_lock(&sc->mtx);
965         if (sc->init_in_progress) {
966                 mtx_unlock(&sc->mtx);
967                 return;
968         } else
969                 sc->init_in_progress = true;
970
971         ixlv_init_locked(sc);
972         mtx_unlock(&sc->mtx);
973
974         /* Wait for init_locked to finish */
975         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
976             && ++retries < IXLV_MAX_INIT_WAIT) {
977                 i40e_msec_pause(25);
978         }
979         if (retries >= IXLV_MAX_INIT_WAIT) {
980                 if_printf(vsi->ifp,
981                     "Init failed to complete in allotted time!\n");
982         }
983
984         mtx_lock(&sc->mtx);
985         sc->init_in_progress = false;
986         mtx_unlock(&sc->mtx);
987 }
988
989 /*
990  * ixlv_attach() helper function; gathers information about
991  * the (virtual) hardware for use elsewhere in the driver.
992  */
993 static void
994 ixlv_init_hw(struct ixlv_sc *sc)
995 {
996         struct i40e_hw *hw = &sc->hw;
997         device_t dev = sc->dev;
998         
999         /* Save off the information about this board */
1000         hw->vendor_id = pci_get_vendor(dev);
1001         hw->device_id = pci_get_device(dev);
1002         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1003         hw->subsystem_vendor_id =
1004             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1005         hw->subsystem_device_id =
1006             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1007
1008         hw->bus.device = pci_get_slot(dev);
1009         hw->bus.func = pci_get_function(dev);
1010 }
1011
1012 /*
1013  * ixlv_attach() helper function; initalizes the admin queue
1014  * and attempts to establish contact with the PF by
1015  * retrying the initial "API version" message several times
1016  * or until the PF responds.
1017  */
1018 static int
1019 ixlv_setup_vc(struct ixlv_sc *sc)
1020 {
1021         struct i40e_hw *hw = &sc->hw;
1022         device_t dev = sc->dev;
1023         int error = 0, ret_error = 0, asq_retries = 0;
1024         bool send_api_ver_retried = 0;
1025
1026         /* Need to set these AQ paramters before initializing AQ */
1027         hw->aq.num_arq_entries = IXL_AQ_LEN;
1028         hw->aq.num_asq_entries = IXL_AQ_LEN;
1029         hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1030         hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1031
1032         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1033                 /* Initialize admin queue */
1034                 error = i40e_init_adminq(hw);
1035                 if (error) {
1036                         device_printf(dev, "%s: init_adminq failed: %d\n",
1037                             __func__, error);
1038                         ret_error = 1;
1039                         continue;
1040                 }
1041
1042                 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1043                     " send_api_ver attempt %d", i+1);
1044
1045 retry_send:
1046                 /* Send VF's API version */
1047                 error = ixlv_send_api_ver(sc);
1048                 if (error) {
1049                         i40e_shutdown_adminq(hw);
1050                         ret_error = 2;
1051                         device_printf(dev, "%s: unable to send api"
1052                             " version to PF on attempt %d, error %d\n",
1053                             __func__, i+1, error);
1054                 }
1055
1056                 asq_retries = 0;
1057                 while (!i40e_asq_done(hw)) {
1058                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1059                                 i40e_shutdown_adminq(hw);
1060                                 device_printf(dev, "Admin Queue timeout "
1061                                     "(waiting for send_api_ver), %d more tries...\n",
1062                                     IXLV_AQ_MAX_ERR - (i + 1));
1063                                 ret_error = 3;
1064                                 break;
1065                         } 
1066                         i40e_msec_pause(10);
1067                 }
1068                 if (asq_retries > IXLV_AQ_MAX_ERR)
1069                         continue;
1070
1071                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1072
1073                 /* Verify that the VF accepts the PF's API version */
1074                 error = ixlv_verify_api_ver(sc);
1075                 if (error == ETIMEDOUT) {
1076                         if (!send_api_ver_retried) {
1077                                 /* Resend message, one more time */
1078                                 send_api_ver_retried++;
1079                                 device_printf(dev,
1080                                     "%s: Timeout while verifying API version on first"
1081                                     " try!\n", __func__);
1082                                 goto retry_send;
1083                         } else {
1084                                 device_printf(dev,
1085                                     "%s: Timeout while verifying API version on second"
1086                                     " try!\n", __func__);
1087                                 ret_error = 4;
1088                                 break;
1089                         }
1090                 }
1091                 if (error) {
1092                         device_printf(dev,
1093                             "%s: Unable to verify API version,"
1094                             " error %s\n", __func__, i40e_stat_str(hw, error));
1095                         ret_error = 5;
1096                 }
1097                 break;
1098         }
1099
1100         if (ret_error >= 4)
1101                 i40e_shutdown_adminq(hw);
1102         return (ret_error);
1103 }
1104
1105 /*
1106  * ixlv_attach() helper function; asks the PF for this VF's
1107  * configuration, and saves the information if it receives it.
1108  */
1109 static int
1110 ixlv_vf_config(struct ixlv_sc *sc)
1111 {
1112         struct i40e_hw *hw = &sc->hw;
1113         device_t dev = sc->dev;
1114         int bufsz, error = 0, ret_error = 0;
1115         int asq_retries, retried = 0;
1116
1117 retry_config:
1118         error = ixlv_send_vf_config_msg(sc);
1119         if (error) {
1120                 device_printf(dev,
1121                     "%s: Unable to send VF config request, attempt %d,"
1122                     " error %d\n", __func__, retried + 1, error);
1123                 ret_error = 2;
1124         }
1125
1126         asq_retries = 0;
1127         while (!i40e_asq_done(hw)) {
1128                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1129                         device_printf(dev, "%s: Admin Queue timeout "
1130                             "(waiting for send_vf_config_msg), attempt %d\n",
1131                             __func__, retried + 1);
1132                         ret_error = 3;
1133                         goto fail;
1134                 }
1135                 i40e_msec_pause(10);
1136         }
1137
1138         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1139             retried + 1);
1140
1141         if (!sc->vf_res) {
1142                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1143                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1144                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1145                 if (!sc->vf_res) {
1146                         device_printf(dev,
1147                             "%s: Unable to allocate memory for VF configuration"
1148                             " message from PF on attempt %d\n", __func__, retried + 1);
1149                         ret_error = 1;
1150                         goto fail;
1151                 }
1152         }
1153
1154         /* Check for VF config response */
1155         error = ixlv_get_vf_config(sc);
1156         if (error == ETIMEDOUT) {
1157                 /* The 1st time we timeout, send the configuration message again */
1158                 if (!retried) {
1159                         retried++;
1160                         goto retry_config;
1161                 }
1162                 device_printf(dev,
1163                     "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1164                     __func__);
1165         }
1166         if (error) {
1167                 device_printf(dev,
1168                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1169                     __func__, retried + 1);
1170                 ret_error = 4;
1171         }
1172         goto done;
1173
1174 fail:
1175         free(sc->vf_res, M_DEVBUF);
1176 done:
1177         return (ret_error);
1178 }
1179
1180 /*
1181  * Allocate MSI/X vectors, setup the AQ vector early
1182  */
1183 static int
1184 ixlv_init_msix(struct ixlv_sc *sc)
1185 {
1186         device_t dev = sc->dev;
1187         int rid, want, vectors, queues, available;
1188         int auto_max_queues;
1189
1190         rid = PCIR_BAR(IXL_MSIX_BAR);
1191         sc->msix_mem = bus_alloc_resource_any(dev,
1192             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1193         if (!sc->msix_mem) {
1194                 /* May not be enabled */
1195                 device_printf(sc->dev,
1196                     "Unable to map MSIX table\n");
1197                 goto fail;
1198         }
1199
1200         available = pci_msix_count(dev); 
1201         if (available == 0) { /* system has msix disabled */
1202                 bus_release_resource(dev, SYS_RES_MEMORY,
1203                     rid, sc->msix_mem);
1204                 sc->msix_mem = NULL;
1205                 goto fail;
1206         }
1207
1208         /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1209         auto_max_queues = min(mp_ncpus, available - 1);
1210         /* Clamp queues to # assigned to VF by PF */
1211         auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1212
1213         /* Override with tunable value if tunable is less than autoconfig count */
1214         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1215                 queues = ixlv_max_queues;
1216         /* Use autoconfig amount if that's lower */
1217         else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1218                 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1219                     "autoconfig amount (%d)...\n",
1220                     ixlv_max_queues, auto_max_queues);
1221                 queues = auto_max_queues;
1222         }
1223         /* Limit maximum auto-configured queues to 8 if no user value is set */
1224         else
1225                 queues = min(auto_max_queues, 8);
1226
1227 #ifdef  RSS
1228         /* If we're doing RSS, clamp at the number of RSS buckets */
1229         if (queues > rss_getnumbuckets())
1230                 queues = rss_getnumbuckets();
1231 #endif
1232
1233         /*
1234         ** Want one vector (RX/TX pair) per queue
1235         ** plus an additional for the admin queue.
1236         */
1237         want = queues + 1;
1238         if (want <= available)  /* Have enough */
1239                 vectors = want;
1240         else {
1241                 device_printf(sc->dev,
1242                     "MSIX Configuration Problem, "
1243                     "%d vectors available but %d wanted!\n",
1244                     available, want);
1245                 goto fail;
1246         }
1247
1248 #ifdef RSS
1249         /*
1250         * If we're doing RSS, the number of queues needs to
1251         * match the number of RSS buckets that are configured.
1252         *
1253         * + If there's more queues than RSS buckets, we'll end
1254         *   up with queues that get no traffic.
1255         *
1256         * + If there's more RSS buckets than queues, we'll end
1257         *   up having multiple RSS buckets map to the same queue,
1258         *   so there'll be some contention.
1259         */
1260         if (queues != rss_getnumbuckets()) {
1261                 device_printf(dev,
1262                     "%s: queues (%d) != RSS buckets (%d)"
1263                     "; performance will be impacted.\n",
1264                      __func__, queues, rss_getnumbuckets());
1265         }
1266 #endif
1267
1268         if (pci_alloc_msix(dev, &vectors) == 0) {
1269                 device_printf(sc->dev,
1270                     "Using MSIX interrupts with %d vectors\n", vectors);
1271                 sc->msix = vectors;
1272                 sc->vsi.num_queues = queues;
1273         }
1274
1275         /* Next we need to setup the vector for the Admin Queue */
1276         rid = 1;        /* zero vector + 1 */
1277         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1278             &rid, RF_SHAREABLE | RF_ACTIVE);
1279         if (sc->res == NULL) {
1280                 device_printf(dev, "Unable to allocate"
1281                     " bus resource: AQ interrupt \n");
1282                 goto fail;
1283         }
1284         if (bus_setup_intr(dev, sc->res,
1285             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1286             ixlv_msix_adminq, sc, &sc->tag)) {
1287                 sc->res = NULL;
1288                 device_printf(dev, "Failed to register AQ handler");
1289                 goto fail;
1290         }
1291         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1292
1293         return (vectors);
1294
1295 fail:
1296         /* The VF driver MUST use MSIX */
1297         return (0);
1298 }
1299
1300 static int
1301 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1302 {
1303         int             rid;
1304         device_t        dev = sc->dev;
1305
1306         rid = PCIR_BAR(0);
1307         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1308             &rid, RF_ACTIVE);
1309
1310         if (!(sc->pci_mem)) {
1311                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1312                 return (ENXIO);
1313         }
1314
1315         sc->osdep.mem_bus_space_tag =
1316                 rman_get_bustag(sc->pci_mem);
1317         sc->osdep.mem_bus_space_handle =
1318                 rman_get_bushandle(sc->pci_mem);
1319         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1320         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1321         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1322
1323         sc->hw.back = &sc->osdep;
1324
1325         /*
1326         ** Explicitly set the guest PCI BUSMASTER capability
1327         ** and we must rewrite the ENABLE in the MSIX control
1328         ** register again at this point to cause the host to
1329         ** successfully initialize us.
1330         **
1331         ** This must be set before accessing any registers.
1332         */
1333         {
1334                 u16 pci_cmd_word;
1335                 int msix_ctrl;
1336                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1337                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1338                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1339                 pci_find_cap(dev, PCIY_MSIX, &rid);
1340                 rid += PCIR_MSIX_CTRL;
1341                 msix_ctrl = pci_read_config(dev, rid, 2);
1342                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1343                 pci_write_config(dev, rid, msix_ctrl, 2);
1344         }
1345
1346         /* Disable adminq interrupts (just in case) */
1347         ixlv_disable_adminq_irq(&sc->hw);
1348
1349         return (0);
1350 }
1351
1352 static void
1353 ixlv_free_pci_resources(struct ixlv_sc *sc)
1354 {
1355         struct ixl_vsi         *vsi = &sc->vsi;
1356         struct ixl_queue       *que = vsi->queues;
1357         device_t                dev = sc->dev;
1358
1359         /* We may get here before stations are setup */
1360         if (que == NULL)
1361                 goto early;
1362
1363         /*
1364         **  Release all msix queue resources:
1365         */
1366         for (int i = 0; i < vsi->num_queues; i++, que++) {
1367                 int rid = que->msix + 1;
1368                 if (que->tag != NULL) {
1369                         bus_teardown_intr(dev, que->res, que->tag);
1370                         que->tag = NULL;
1371                 }
1372                 if (que->res != NULL) {
1373                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1374                         que->res = NULL;
1375                 }
1376         }
1377         
1378 early:
1379         pci_release_msi(dev);
1380
1381         if (sc->msix_mem != NULL)
1382                 bus_release_resource(dev, SYS_RES_MEMORY,
1383                     PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1384
1385         if (sc->pci_mem != NULL)
1386                 bus_release_resource(dev, SYS_RES_MEMORY,
1387                     PCIR_BAR(0), sc->pci_mem);
1388 }
1389
1390 /*
1391  * Create taskqueue and tasklet for Admin Queue interrupts.
1392  */
1393 static int
1394 ixlv_init_taskqueue(struct ixlv_sc *sc)
1395 {
1396         int error = 0;
1397
1398         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1399
1400         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1401             taskqueue_thread_enqueue, &sc->tq);
1402         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1403             device_get_nameunit(sc->dev));
1404
1405         return (error);
1406 }
1407
1408 /*********************************************************************
1409  *
1410  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1411  *
1412  **********************************************************************/
1413 static int
1414 ixlv_assign_msix(struct ixlv_sc *sc)
1415 {
1416         device_t        dev = sc->dev;
1417         struct          ixl_vsi *vsi = &sc->vsi;
1418         struct          ixl_queue *que = vsi->queues;
1419         struct          tx_ring  *txr;
1420         int             error, rid, vector = 1;
1421 #ifdef  RSS
1422         cpuset_t        cpu_mask;
1423 #endif
1424
1425         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1426                 int cpu_id = i;
1427                 rid = vector + 1;
1428                 txr = &que->txr;
1429                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1430                     RF_SHAREABLE | RF_ACTIVE);
1431                 if (que->res == NULL) {
1432                         device_printf(dev,"Unable to allocate"
1433                             " bus resource: que interrupt [%d]\n", vector);
1434                         return (ENXIO);
1435                 }
1436                 /* Set the handler function */
1437                 error = bus_setup_intr(dev, que->res,
1438                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1439                     ixlv_msix_que, que, &que->tag);
1440                 if (error) {
1441                         que->res = NULL;
1442                         device_printf(dev, "Failed to register que handler");
1443                         return (error);
1444                 }
1445                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1446                 /* Bind the vector to a CPU */
1447 #ifdef RSS
1448                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1449 #endif
1450                 bus_bind_intr(dev, que->res, cpu_id);
1451                 que->msix = vector;
1452                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1453                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1454                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1455                     taskqueue_thread_enqueue, &que->tq);
1456 #ifdef RSS
1457                 CPU_SETOF(cpu_id, &cpu_mask);
1458                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1459                     &cpu_mask, "%s (bucket %d)",
1460                     device_get_nameunit(dev), cpu_id);
1461 #else
1462                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1463                     "%s que", device_get_nameunit(dev));
1464 #endif
1465
1466         }
1467
1468         return (0);
1469 }
1470
1471 /*
1472 ** Requests a VF reset from the PF.
1473 **
1474 ** Requires the VF's Admin Queue to be initialized.
1475 */
1476 static int
1477 ixlv_reset(struct ixlv_sc *sc)
1478 {
1479         struct i40e_hw  *hw = &sc->hw;
1480         device_t        dev = sc->dev;
1481         int             error = 0;
1482
1483         /* Ask the PF to reset us if we are initiating */
1484         if (sc->init_state != IXLV_RESET_PENDING)
1485                 ixlv_request_reset(sc);
1486
1487         i40e_msec_pause(100);
1488         error = ixlv_reset_complete(hw);
1489         if (error) {
1490                 device_printf(dev, "%s: VF reset failed\n",
1491                     __func__);
1492                 return (error);
1493         }
1494
1495         error = i40e_shutdown_adminq(hw);
1496         if (error) {
1497                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1498                     __func__, error);
1499                 return (error);
1500         }
1501
1502         error = i40e_init_adminq(hw);
1503         if (error) {
1504                 device_printf(dev, "%s: init_adminq failed: %d\n",
1505                     __func__, error);
1506                 return(error);
1507         }
1508
1509         return (0);
1510 }
1511
1512 static int
1513 ixlv_reset_complete(struct i40e_hw *hw)
1514 {
1515         u32 reg;
1516
1517         /* Wait up to ~10 seconds */
1518         for (int i = 0; i < 100; i++) {
1519                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1520                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1521
1522                 if ((reg == I40E_VFR_VFACTIVE) ||
1523                     (reg == I40E_VFR_COMPLETED))
1524                         return (0);
1525                 i40e_msec_pause(100);
1526         }
1527
1528         return (EBUSY);
1529 }
1530
1531
1532 /*********************************************************************
1533  *
1534  *  Setup networking device structure and register an interface.
1535  *
1536  **********************************************************************/
1537 static int
1538 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1539 {
1540         struct ifnet            *ifp;
1541         struct ixl_vsi          *vsi = &sc->vsi;
1542         struct ixl_queue        *que = vsi->queues;
1543
1544         INIT_DBG_DEV(dev, "begin");
1545
1546         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1547         if (ifp == NULL) {
1548                 device_printf(dev, "%s: could not allocate ifnet"
1549                     " structure!\n", __func__);
1550                 return (-1);
1551         }
1552
1553         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1554
1555         ifp->if_mtu = ETHERMTU;
1556         ifp->if_baudrate = IF_Gbps(40);
1557         ifp->if_init = ixlv_init;
1558         ifp->if_softc = vsi;
1559         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1560         ifp->if_ioctl = ixlv_ioctl;
1561
1562 #if __FreeBSD_version >= 1100000
1563         if_setgetcounterfn(ifp, ixl_get_counter);
1564 #endif
1565
1566         ifp->if_transmit = ixl_mq_start;
1567
1568         ifp->if_qflush = ixl_qflush;
1569         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1570
1571         ether_ifattach(ifp, sc->hw.mac.addr);
1572
1573         vsi->max_frame_size =
1574             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1575             + ETHER_VLAN_ENCAP_LEN;
1576
1577         /*
1578          * Tell the upper layer(s) we support long frames.
1579          */
1580         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1581
1582         ifp->if_capabilities |= IFCAP_HWCSUM;
1583         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1584         ifp->if_capabilities |= IFCAP_TSO;
1585         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1586
1587         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1588                              |  IFCAP_VLAN_HWTSO
1589                              |  IFCAP_VLAN_MTU
1590                              |  IFCAP_VLAN_HWCSUM
1591                              |  IFCAP_LRO;
1592         ifp->if_capenable = ifp->if_capabilities;
1593
1594         /*
1595         ** Don't turn this on by default, if vlans are
1596         ** created on another pseudo device (eg. lagg)
1597         ** then vlan events are not passed thru, breaking
1598         ** operation, but with HW FILTER off it works. If
1599         ** using vlans directly on the ixl driver you can
1600         ** enable this and get full hardware tag filtering.
1601         */
1602         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1603
1604         /*
1605          * Specify the media types supported by this adapter and register
1606          * callbacks to update media and link information
1607          */
1608         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1609                      ixlv_media_status);
1610
1611         // JFV Add media types later?
1612
1613         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1614         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1615
1616         INIT_DBG_DEV(dev, "end");
1617         return (0);
1618 }
1619
1620 /*
1621 ** Allocate and setup the interface queues
1622 */
1623 static int
1624 ixlv_setup_queues(struct ixlv_sc *sc)
1625 {
1626         device_t                dev = sc->dev;
1627         struct ixl_vsi          *vsi;
1628         struct ixl_queue        *que;
1629         struct tx_ring          *txr;
1630         struct rx_ring          *rxr;
1631         int                     rsize, tsize;
1632         int                     error = I40E_SUCCESS;
1633
1634         vsi = &sc->vsi;
1635         vsi->back = (void *)sc;
1636         vsi->hw = &sc->hw;
1637         vsi->num_vlans = 0;
1638
1639         /* Get memory for the station queues */
1640         if (!(vsi->queues =
1641                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1642                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1643                         device_printf(dev, "Unable to allocate queue memory\n");
1644                         error = ENOMEM;
1645                         goto early;
1646         }
1647
1648         for (int i = 0; i < vsi->num_queues; i++) {
1649                 que = &vsi->queues[i];
1650                 que->num_desc = ixlv_ringsz;
1651                 que->me = i;
1652                 que->vsi = vsi;
1653
1654                 txr = &que->txr;
1655                 txr->que = que;
1656                 txr->tail = I40E_QTX_TAIL1(que->me);
1657                 /* Initialize the TX lock */
1658                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1659                     device_get_nameunit(dev), que->me);
1660                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1661                 /*
1662                 ** Create the TX descriptor ring, the extra int is
1663                 ** added as the location for HEAD WB.
1664                 */
1665                 tsize = roundup2((que->num_desc *
1666                     sizeof(struct i40e_tx_desc)) +
1667                     sizeof(u32), DBA_ALIGN);
1668                 if (i40e_allocate_dma_mem(&sc->hw,
1669                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1670                         device_printf(dev,
1671                             "Unable to allocate TX Descriptor memory\n");
1672                         error = ENOMEM;
1673                         goto fail;
1674                 }
1675                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1676                 bzero((void *)txr->base, tsize);
1677                 /* Now allocate transmit soft structs for the ring */
1678                 if (ixl_allocate_tx_data(que)) {
1679                         device_printf(dev,
1680                             "Critical Failure setting up TX structures\n");
1681                         error = ENOMEM;
1682                         goto fail;
1683                 }
1684                 /* Allocate a buf ring */
1685                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1686                     M_WAITOK, &txr->mtx);
1687                 if (txr->br == NULL) {
1688                         device_printf(dev,
1689                             "Critical Failure setting up TX buf ring\n");
1690                         error = ENOMEM;
1691                         goto fail;
1692                 }
1693
1694                 /*
1695                  * Next the RX queues...
1696                  */ 
1697                 rsize = roundup2(que->num_desc *
1698                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1699                 rxr = &que->rxr;
1700                 rxr->que = que;
1701                 rxr->tail = I40E_QRX_TAIL1(que->me);
1702
1703                 /* Initialize the RX side lock */
1704                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1705                     device_get_nameunit(dev), que->me);
1706                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1707
1708                 if (i40e_allocate_dma_mem(&sc->hw,
1709                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1710                         device_printf(dev,
1711                             "Unable to allocate RX Descriptor memory\n");
1712                         error = ENOMEM;
1713                         goto fail;
1714                 }
1715                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1716                 bzero((void *)rxr->base, rsize);
1717
1718                 /* Allocate receive soft structs for the ring */
1719                 if (ixl_allocate_rx_data(que)) {
1720                         device_printf(dev,
1721                             "Critical Failure setting up receive structs\n");
1722                         error = ENOMEM;
1723                         goto fail;
1724                 }
1725         }
1726
1727         return (0);
1728
1729 fail:
1730         for (int i = 0; i < vsi->num_queues; i++) {
1731                 que = &vsi->queues[i];
1732                 rxr = &que->rxr;
1733                 txr = &que->txr;
1734                 if (rxr->base)
1735                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1736                 if (txr->base)
1737                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1738         }
1739         free(vsi->queues, M_DEVBUF);
1740
1741 early:
1742         return (error);
1743 }
1744
1745 /*
1746 ** This routine is run via an vlan config EVENT,
1747 ** it enables us to use the HW Filter table since
1748 ** we can get the vlan id. This just creates the
1749 ** entry in the soft version of the VFTA, init will
1750 ** repopulate the real table.
1751 */
1752 static void
1753 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1754 {
1755         struct ixl_vsi          *vsi = arg;
1756         struct ixlv_sc          *sc = vsi->back;
1757         struct ixlv_vlan_filter *v;
1758
1759
1760         if (ifp->if_softc != arg)   /* Not our event */
1761                 return;
1762
1763         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1764                 return;
1765
1766         /* Sanity check - make sure it doesn't already exist */
1767         SLIST_FOREACH(v, sc->vlan_filters, next) {
1768                 if (v->vlan == vtag)
1769                         return;
1770         }
1771
1772         mtx_lock(&sc->mtx);
1773         ++vsi->num_vlans;
1774         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1775         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1776         v->vlan = vtag;
1777         v->flags = IXL_FILTER_ADD;
1778         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1779             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1780         mtx_unlock(&sc->mtx);
1781         return;
1782 }
1783
1784 /*
1785 ** This routine is run via an vlan
1786 ** unconfig EVENT, remove our entry
1787 ** in the soft vfta.
1788 */
1789 static void
1790 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1791 {
1792         struct ixl_vsi          *vsi = arg;
1793         struct ixlv_sc          *sc = vsi->back;
1794         struct ixlv_vlan_filter *v;
1795         int                     i = 0;
1796         
1797         if (ifp->if_softc != arg)
1798                 return;
1799
1800         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1801                 return;
1802
1803         mtx_lock(&sc->mtx);
1804         SLIST_FOREACH(v, sc->vlan_filters, next) {
1805                 if (v->vlan == vtag) {
1806                         v->flags = IXL_FILTER_DEL;
1807                         ++i;
1808                         --vsi->num_vlans;
1809                 }
1810         }
1811         if (i)
1812                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1813                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1814         mtx_unlock(&sc->mtx);
1815         return;
1816 }
1817
1818 /*
1819 ** Get a new filter and add it to the mac filter list.
1820 */
1821 static struct ixlv_mac_filter *
1822 ixlv_get_mac_filter(struct ixlv_sc *sc)
1823 {
1824         struct ixlv_mac_filter  *f;
1825
1826         f = malloc(sizeof(struct ixlv_mac_filter),
1827             M_DEVBUF, M_NOWAIT | M_ZERO);
1828         if (f)
1829                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1830
1831         return (f);
1832 }
1833
1834 /*
1835 ** Find the filter with matching MAC address
1836 */
1837 static struct ixlv_mac_filter *
1838 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1839 {
1840         struct ixlv_mac_filter  *f;
1841         bool                            match = FALSE;
1842
1843         SLIST_FOREACH(f, sc->mac_filters, next) {
1844                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1845                         match = TRUE;
1846                         break;
1847                 }
1848         }       
1849
1850         if (!match)
1851                 f = NULL;
1852         return (f);
1853 }
1854
1855 static int
1856 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1857 {
1858         device_t                dev = sc->dev;
1859         int                     error = 0;
1860
1861         if (sc->tag != NULL) {
1862                 bus_teardown_intr(dev, sc->res, sc->tag);
1863                 if (error) {
1864                         device_printf(dev, "bus_teardown_intr() for"
1865                             " interrupt 0 failed\n");
1866                         // return (ENXIO);
1867                 }
1868                 sc->tag = NULL;
1869         }
1870         if (sc->res != NULL) {
1871                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1872                 if (error) {
1873                         device_printf(dev, "bus_release_resource() for"
1874                             " interrupt 0 failed\n");
1875                         // return (ENXIO);
1876                 }
1877                 sc->res = NULL;
1878         }
1879
1880         return (0);
1881
1882 }
1883
1884 /*
1885 ** Admin Queue interrupt handler
1886 */
1887 static void
1888 ixlv_msix_adminq(void *arg)
1889 {
1890         struct ixlv_sc  *sc = arg;
1891         struct i40e_hw  *hw = &sc->hw;
1892         u32             reg, mask;
1893
1894         reg = rd32(hw, I40E_VFINT_ICR01);
1895         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1896
1897         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1898         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1899         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1900
1901         /* schedule task */
1902         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1903         return;
1904 }
1905
1906 void
1907 ixlv_enable_intr(struct ixl_vsi *vsi)
1908 {
1909         struct i40e_hw          *hw = vsi->hw;
1910         struct ixl_queue        *que = vsi->queues;
1911
1912         ixlv_enable_adminq_irq(hw);
1913         for (int i = 0; i < vsi->num_queues; i++, que++)
1914                 ixlv_enable_queue_irq(hw, que->me);
1915 }
1916
1917 void
1918 ixlv_disable_intr(struct ixl_vsi *vsi)
1919 {
1920         struct i40e_hw          *hw = vsi->hw;
1921         struct ixl_queue       *que = vsi->queues;
1922
1923         ixlv_disable_adminq_irq(hw);
1924         for (int i = 0; i < vsi->num_queues; i++, que++)
1925                 ixlv_disable_queue_irq(hw, que->me);
1926 }
1927
1928
1929 static void
1930 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1931 {
1932         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1933         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1934         /* flush */
1935         rd32(hw, I40E_VFGEN_RSTAT);
1936         return;
1937 }
1938
1939 static void
1940 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1941 {
1942         wr32(hw, I40E_VFINT_DYN_CTL01,
1943             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1944             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1945         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1946         /* flush */
1947         rd32(hw, I40E_VFGEN_RSTAT);
1948         return;
1949 }
1950
1951 static void
1952 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1953 {
1954         u32             reg;
1955
1956         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1957             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1958             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1959         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1960 }
1961
1962 static void
1963 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1964 {
1965         wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1966             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1967         rd32(hw, I40E_VFGEN_RSTAT);
1968         return;
1969 }
1970
1971 /*
1972  * Get initial ITR values from tunable values.
1973  */
1974 static void
1975 ixlv_configure_itr(struct ixlv_sc *sc)
1976 {
1977         struct i40e_hw          *hw = &sc->hw;
1978         struct ixl_vsi          *vsi = &sc->vsi;
1979         struct ixl_queue        *que = vsi->queues;
1980
1981         vsi->rx_itr_setting = ixlv_rx_itr;
1982         vsi->tx_itr_setting = ixlv_tx_itr;
1983
1984         for (int i = 0; i < vsi->num_queues; i++, que++) {
1985                 struct tx_ring  *txr = &que->txr;
1986                 struct rx_ring  *rxr = &que->rxr;
1987
1988                 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1989                     vsi->rx_itr_setting);
1990                 rxr->itr = vsi->rx_itr_setting;
1991                 rxr->latency = IXL_AVE_LATENCY;
1992
1993                 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1994                     vsi->tx_itr_setting);
1995                 txr->itr = vsi->tx_itr_setting;
1996                 txr->latency = IXL_AVE_LATENCY;
1997         }
1998 }
1999
2000 /*
2001 ** Provide a update to the queue RX
2002 ** interrupt moderation value.
2003 */
2004 static void
2005 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2006 {
2007         struct ixl_vsi  *vsi = que->vsi;
2008         struct i40e_hw  *hw = vsi->hw;
2009         struct rx_ring  *rxr = &que->rxr;
2010         u16             rx_itr;
2011         u16             rx_latency = 0;
2012         int             rx_bytes;
2013
2014
2015         /* Idle, do nothing */
2016         if (rxr->bytes == 0)
2017                 return;
2018
2019         if (ixlv_dynamic_rx_itr) {
2020                 rx_bytes = rxr->bytes/rxr->itr;
2021                 rx_itr = rxr->itr;
2022
2023                 /* Adjust latency range */
2024                 switch (rxr->latency) {
2025                 case IXL_LOW_LATENCY:
2026                         if (rx_bytes > 10) {
2027                                 rx_latency = IXL_AVE_LATENCY;
2028                                 rx_itr = IXL_ITR_20K;
2029                         }
2030                         break;
2031                 case IXL_AVE_LATENCY:
2032                         if (rx_bytes > 20) {
2033                                 rx_latency = IXL_BULK_LATENCY;
2034                                 rx_itr = IXL_ITR_8K;
2035                         } else if (rx_bytes <= 10) {
2036                                 rx_latency = IXL_LOW_LATENCY;
2037                                 rx_itr = IXL_ITR_100K;
2038                         }
2039                         break;
2040                 case IXL_BULK_LATENCY:
2041                         if (rx_bytes <= 20) {
2042                                 rx_latency = IXL_AVE_LATENCY;
2043                                 rx_itr = IXL_ITR_20K;
2044                         }
2045                         break;
2046                  }
2047
2048                 rxr->latency = rx_latency;
2049
2050                 if (rx_itr != rxr->itr) {
2051                         /* do an exponential smoothing */
2052                         rx_itr = (10 * rx_itr * rxr->itr) /
2053                             ((9 * rx_itr) + rxr->itr);
2054                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2055                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2056                             que->me), rxr->itr);
2057                 }
2058         } else { /* We may have have toggled to non-dynamic */
2059                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2060                         vsi->rx_itr_setting = ixlv_rx_itr;
2061                 /* Update the hardware if needed */
2062                 if (rxr->itr != vsi->rx_itr_setting) {
2063                         rxr->itr = vsi->rx_itr_setting;
2064                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2065                             que->me), rxr->itr);
2066                 }
2067         }
2068         rxr->bytes = 0;
2069         rxr->packets = 0;
2070         return;
2071 }
2072
2073
2074 /*
2075 ** Provide a update to the queue TX
2076 ** interrupt moderation value.
2077 */
2078 static void
2079 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2080 {
2081         struct ixl_vsi  *vsi = que->vsi;
2082         struct i40e_hw  *hw = vsi->hw;
2083         struct tx_ring  *txr = &que->txr;
2084         u16             tx_itr;
2085         u16             tx_latency = 0;
2086         int             tx_bytes;
2087
2088
2089         /* Idle, do nothing */
2090         if (txr->bytes == 0)
2091                 return;
2092
2093         if (ixlv_dynamic_tx_itr) {
2094                 tx_bytes = txr->bytes/txr->itr;
2095                 tx_itr = txr->itr;
2096
2097                 switch (txr->latency) {
2098                 case IXL_LOW_LATENCY:
2099                         if (tx_bytes > 10) {
2100                                 tx_latency = IXL_AVE_LATENCY;
2101                                 tx_itr = IXL_ITR_20K;
2102                         }
2103                         break;
2104                 case IXL_AVE_LATENCY:
2105                         if (tx_bytes > 20) {
2106                                 tx_latency = IXL_BULK_LATENCY;
2107                                 tx_itr = IXL_ITR_8K;
2108                         } else if (tx_bytes <= 10) {
2109                                 tx_latency = IXL_LOW_LATENCY;
2110                                 tx_itr = IXL_ITR_100K;
2111                         }
2112                         break;
2113                 case IXL_BULK_LATENCY:
2114                         if (tx_bytes <= 20) {
2115                                 tx_latency = IXL_AVE_LATENCY;
2116                                 tx_itr = IXL_ITR_20K;
2117                         }
2118                         break;
2119                 }
2120
2121                 txr->latency = tx_latency;
2122
2123                 if (tx_itr != txr->itr) {
2124                  /* do an exponential smoothing */
2125                         tx_itr = (10 * tx_itr * txr->itr) /
2126                             ((9 * tx_itr) + txr->itr);
2127                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2128                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2129                             que->me), txr->itr);
2130                 }
2131
2132         } else { /* We may have have toggled to non-dynamic */
2133                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2134                         vsi->tx_itr_setting = ixlv_tx_itr;
2135                 /* Update the hardware if needed */
2136                 if (txr->itr != vsi->tx_itr_setting) {
2137                         txr->itr = vsi->tx_itr_setting;
2138                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2139                             que->me), txr->itr);
2140                 }
2141         }
2142         txr->bytes = 0;
2143         txr->packets = 0;
2144         return;
2145 }
2146
2147
2148 /*
2149 **
2150 ** MSIX Interrupt Handlers and Tasklets
2151 **
2152 */
2153 static void
2154 ixlv_handle_que(void *context, int pending)
2155 {
2156         struct ixl_queue *que = context;
2157         struct ixl_vsi *vsi = que->vsi;
2158         struct i40e_hw  *hw = vsi->hw;
2159         struct tx_ring  *txr = &que->txr;
2160         struct ifnet    *ifp = vsi->ifp;
2161         bool            more;
2162
2163         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2164                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2165                 mtx_lock(&txr->mtx);
2166                 ixl_txeof(que);
2167                 if (!drbr_empty(ifp, txr->br))
2168                         ixl_mq_start_locked(ifp, txr);
2169                 mtx_unlock(&txr->mtx);
2170                 if (more) {
2171                         taskqueue_enqueue(que->tq, &que->task);
2172                         return;
2173                 }
2174         }
2175
2176         /* Reenable this interrupt - hmmm */
2177         ixlv_enable_queue_irq(hw, que->me);
2178         return;
2179 }
2180
2181
2182 /*********************************************************************
2183  *
2184  *  MSIX Queue Interrupt Service routine
2185  *
2186  **********************************************************************/
2187 static void
2188 ixlv_msix_que(void *arg)
2189 {
2190         struct ixl_queue        *que = arg;
2191         struct ixl_vsi  *vsi = que->vsi;
2192         struct i40e_hw  *hw = vsi->hw;
2193         struct tx_ring  *txr = &que->txr;
2194         bool            more_tx, more_rx;
2195
2196         /* Spurious interrupts are ignored */
2197         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2198                 return;
2199
2200         ++que->irqs;
2201
2202         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2203
2204         mtx_lock(&txr->mtx);
2205         more_tx = ixl_txeof(que);
2206         /*
2207         ** Make certain that if the stack 
2208         ** has anything queued the task gets
2209         ** scheduled to handle it.
2210         */
2211         if (!drbr_empty(vsi->ifp, txr->br))
2212                 more_tx = 1;
2213         mtx_unlock(&txr->mtx);
2214
2215         ixlv_set_queue_rx_itr(que);
2216         ixlv_set_queue_tx_itr(que);
2217
2218         if (more_tx || more_rx)
2219                 taskqueue_enqueue(que->tq, &que->task);
2220         else
2221                 ixlv_enable_queue_irq(hw, que->me);
2222
2223         return;
2224 }
2225
2226
2227 /*********************************************************************
2228  *
2229  *  Media Ioctl callback
2230  *
2231  *  This routine is called whenever the user queries the status of
2232  *  the interface using ifconfig.
2233  *
2234  **********************************************************************/
2235 static void
2236 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2237 {
2238         struct ixl_vsi          *vsi = ifp->if_softc;
2239         struct ixlv_sc  *sc = vsi->back;
2240
2241         INIT_DBG_IF(ifp, "begin");
2242
2243         mtx_lock(&sc->mtx);
2244
2245         ixlv_update_link_status(sc);
2246
2247         ifmr->ifm_status = IFM_AVALID;
2248         ifmr->ifm_active = IFM_ETHER;
2249
2250         if (!sc->link_up) {
2251                 mtx_unlock(&sc->mtx);
2252                 INIT_DBG_IF(ifp, "end: link not up");
2253                 return;
2254         }
2255
2256         ifmr->ifm_status |= IFM_ACTIVE;
2257         /* Hardware is always full-duplex */
2258         ifmr->ifm_active |= IFM_FDX;
2259         mtx_unlock(&sc->mtx);
2260         INIT_DBG_IF(ifp, "end");
2261         return;
2262 }
2263
2264 /*********************************************************************
2265  *
2266  *  Media Ioctl callback
2267  *
2268  *  This routine is called when the user changes speed/duplex using
2269  *  media/mediopt option with ifconfig.
2270  *
2271  **********************************************************************/
2272 static int
2273 ixlv_media_change(struct ifnet * ifp)
2274 {
2275         struct ixl_vsi *vsi = ifp->if_softc;
2276         struct ifmedia *ifm = &vsi->media;
2277
2278         INIT_DBG_IF(ifp, "begin");
2279
2280         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2281                 return (EINVAL);
2282
2283         INIT_DBG_IF(ifp, "end");
2284         return (0);
2285 }
2286
2287
2288 /*********************************************************************
2289  *  Multicast Initialization
2290  *
2291  *  This routine is called by init to reset a fresh state.
2292  *
2293  **********************************************************************/
2294
2295 static void
2296 ixlv_init_multi(struct ixl_vsi *vsi)
2297 {
2298         struct ixlv_mac_filter *f;
2299         struct ixlv_sc  *sc = vsi->back;
2300         int                     mcnt = 0;
2301
2302         IOCTL_DBG_IF(vsi->ifp, "begin");
2303
2304         /* First clear any multicast filters */
2305         SLIST_FOREACH(f, sc->mac_filters, next) {
2306                 if ((f->flags & IXL_FILTER_USED)
2307                     && (f->flags & IXL_FILTER_MC)) {
2308                         f->flags |= IXL_FILTER_DEL;
2309                         mcnt++;
2310                 }
2311         }
2312         if (mcnt > 0)
2313                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2314                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2315                     sc);
2316
2317         IOCTL_DBG_IF(vsi->ifp, "end");
2318 }
2319
2320 static void
2321 ixlv_add_multi(struct ixl_vsi *vsi)
2322 {
2323         struct ifmultiaddr      *ifma;
2324         struct ifnet            *ifp = vsi->ifp;
2325         struct ixlv_sc  *sc = vsi->back;
2326         int                     mcnt = 0;
2327
2328         IOCTL_DBG_IF(ifp, "begin");
2329
2330         if_maddr_rlock(ifp);
2331         /*
2332         ** Get a count, to decide if we
2333         ** simply use multicast promiscuous.
2334         */
2335         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2336                 if (ifma->ifma_addr->sa_family != AF_LINK)
2337                         continue;
2338                 mcnt++;
2339         }
2340         if_maddr_runlock(ifp);
2341
2342         /* TODO: Remove -- cannot set promiscuous mode in a VF */
2343         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2344                 /* delete all multicast filters */
2345                 ixlv_init_multi(vsi);
2346                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2347                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2348                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2349                     sc);
2350                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2351                 return;
2352         }
2353
2354         mcnt = 0;
2355         if_maddr_rlock(ifp);
2356         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2357                 if (ifma->ifma_addr->sa_family != AF_LINK)
2358                         continue;
2359                 if (!ixlv_add_mac_filter(sc,
2360                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2361                     IXL_FILTER_MC))
2362                         mcnt++;
2363         }
2364         if_maddr_runlock(ifp);
2365         /*
2366         ** Notify AQ task that sw filters need to be
2367         ** added to hw list
2368         */
2369         if (mcnt > 0)
2370                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2371                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2372                     sc);
2373
2374         IOCTL_DBG_IF(ifp, "end");
2375 }
2376
2377 static void
2378 ixlv_del_multi(struct ixl_vsi *vsi)
2379 {
2380         struct ixlv_mac_filter *f;
2381         struct ifmultiaddr      *ifma;
2382         struct ifnet            *ifp = vsi->ifp;
2383         struct ixlv_sc  *sc = vsi->back;
2384         int                     mcnt = 0;
2385         bool            match = FALSE;
2386
2387         IOCTL_DBG_IF(ifp, "begin");
2388
2389         /* Search for removed multicast addresses */
2390         if_maddr_rlock(ifp);
2391         SLIST_FOREACH(f, sc->mac_filters, next) {
2392                 if ((f->flags & IXL_FILTER_USED)
2393                     && (f->flags & IXL_FILTER_MC)) {
2394                         /* check if mac address in filter is in sc's list */
2395                         match = FALSE;
2396                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2397                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2398                                         continue;
2399                                 u8 *mc_addr =
2400                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2401                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2402                                         match = TRUE;
2403                                         break;
2404                                 }
2405                         }
2406                         /* if this filter is not in the sc's list, remove it */
2407                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2408                                 f->flags |= IXL_FILTER_DEL;
2409                                 mcnt++;
2410                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2411                                     MAC_FORMAT_ARGS(f->macaddr));
2412                         }
2413                         else if (match == FALSE)
2414                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2415                                     MAC_FORMAT_ARGS(f->macaddr));
2416                 }
2417         }
2418         if_maddr_runlock(ifp);
2419
2420         if (mcnt > 0)
2421                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2422                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2423                     sc);
2424
2425         IOCTL_DBG_IF(ifp, "end");
2426 }
2427
2428 /*********************************************************************
2429  *  Timer routine
2430  *
2431  *  This routine checks for link status,updates statistics,
2432  *  and runs the watchdog check.
2433  *
2434  **********************************************************************/
2435
2436 static void
2437 ixlv_local_timer(void *arg)
2438 {
2439         struct ixlv_sc  *sc = arg;
2440         struct i40e_hw          *hw = &sc->hw;
2441         struct ixl_vsi          *vsi = &sc->vsi;
2442         struct ixl_queue        *que = vsi->queues;
2443         device_t                dev = sc->dev;
2444         struct tx_ring          *txr;
2445         int                     hung = 0;
2446         u32                     mask, val;
2447         s32                     timer, new_timer;
2448
2449         IXLV_CORE_LOCK_ASSERT(sc);
2450
2451         /* If Reset is in progress just bail */
2452         if (sc->init_state == IXLV_RESET_PENDING)
2453                 return;
2454
2455         /* Check for when PF triggers a VF reset */
2456         val = rd32(hw, I40E_VFGEN_RSTAT) &
2457             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2458
2459         if (val != I40E_VFR_VFACTIVE
2460             && val != I40E_VFR_COMPLETED) {
2461                 DDPRINTF(dev, "reset in progress! (%d)", val);
2462                 return;
2463         }
2464
2465         ixlv_request_stats(sc);
2466
2467         /* clean and process any events */
2468         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2469
2470         /*
2471         ** Check status on the queues for a hang
2472         */
2473         mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2474             I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
2475             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2476
2477         for (int i = 0; i < vsi->num_queues; i++, que++) {
2478                 txr = &que->txr;
2479                 timer = atomic_load_acq_32(&txr->watchdog_timer);
2480                 if (timer > 0) {
2481                         new_timer = timer - hz;
2482                         if (new_timer <= 0) {
2483                                 atomic_store_rel_32(&txr->watchdog_timer, -1);
2484                                 device_printf(dev, "WARNING: queue %d "
2485                                     "appears to be hung!\n", que->me);
2486                                 ++hung;
2487                         } else {
2488                                 /*
2489                                  * If this fails, that means something in the TX path has updated
2490                                  * the watchdog, so it means the TX path is still working and
2491                                  * the watchdog doesn't need to countdown.
2492                                  */
2493                                 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
2494                                 /* Any queues with outstanding work get a sw irq */
2495                                 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2496                         }
2497                 }
2498         }
2499         /* Reset when a queue shows hung */
2500         if (hung)
2501                 goto hung;
2502
2503         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2504         return;
2505
2506 hung:
2507         device_printf(dev, "WARNING: Resetting!\n");
2508         sc->init_state = IXLV_RESET_REQUIRED;
2509         sc->watchdog_events++;
2510         ixlv_stop(sc);
2511         ixlv_init_locked(sc);
2512 }
2513
2514 /*
2515 ** Note: this routine updates the OS on the link state
2516 **      the real check of the hardware only happens with
2517 **      a link interrupt.
2518 */
2519 void
2520 ixlv_update_link_status(struct ixlv_sc *sc)
2521 {
2522         struct ixl_vsi          *vsi = &sc->vsi;
2523         struct ifnet            *ifp = vsi->ifp;
2524
2525         if (sc->link_up){ 
2526                 if (vsi->link_active == FALSE) {
2527                         if (bootverbose)
2528                                 if_printf(ifp,"Link is Up, %d Gbps\n",
2529                                     (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2530                         vsi->link_active = TRUE;
2531                         if_link_state_change(ifp, LINK_STATE_UP);
2532                 }
2533         } else { /* Link down */
2534                 if (vsi->link_active == TRUE) {
2535                         if (bootverbose)
2536                                 if_printf(ifp,"Link is Down\n");
2537                         if_link_state_change(ifp, LINK_STATE_DOWN);
2538                         vsi->link_active = FALSE;
2539                 }
2540         }
2541
2542         return;
2543 }
2544
2545 /*********************************************************************
2546  *
2547  *  This routine disables all traffic on the adapter by issuing a
2548  *  global reset on the MAC and deallocates TX/RX buffers.
2549  *
2550  **********************************************************************/
2551
2552 static void
2553 ixlv_stop(struct ixlv_sc *sc)
2554 {
2555         struct ifnet *ifp;
2556         int start;
2557
2558         ifp = sc->vsi.ifp;
2559         INIT_DBG_IF(ifp, "begin");
2560
2561         IXLV_CORE_LOCK_ASSERT(sc);
2562
2563         ixl_vc_flush(&sc->vc_mgr);
2564         ixlv_disable_queues(sc);
2565
2566         start = ticks;
2567         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2568             ((ticks - start) < hz/10))
2569                 ixlv_do_adminq_locked(sc);
2570
2571         /* Stop the local timer */
2572         callout_stop(&sc->timer);
2573
2574         INIT_DBG_IF(ifp, "end");
2575 }
2576
2577
2578 /*********************************************************************
2579  *
2580  *  Free all station queue structs.
2581  *
2582  **********************************************************************/
2583 static void
2584 ixlv_free_queues(struct ixl_vsi *vsi)
2585 {
2586         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2587         struct ixl_queue        *que = vsi->queues;
2588
2589         for (int i = 0; i < vsi->num_queues; i++, que++) {
2590                 struct tx_ring *txr = &que->txr;
2591                 struct rx_ring *rxr = &que->rxr;
2592         
2593                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2594                         continue;
2595                 IXL_TX_LOCK(txr);
2596                 ixl_free_que_tx(que);
2597                 if (txr->base)
2598                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2599                 IXL_TX_UNLOCK(txr);
2600                 IXL_TX_LOCK_DESTROY(txr);
2601
2602                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2603                         continue;
2604                 IXL_RX_LOCK(rxr);
2605                 ixl_free_que_rx(que);
2606                 if (rxr->base)
2607                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2608                 IXL_RX_UNLOCK(rxr);
2609                 IXL_RX_LOCK_DESTROY(rxr);
2610                 
2611         }
2612         free(vsi->queues, M_DEVBUF);
2613 }
2614
2615 static void
2616 ixlv_config_rss_reg(struct ixlv_sc *sc)
2617 {
2618         struct i40e_hw  *hw = &sc->hw;
2619         struct ixl_vsi  *vsi = &sc->vsi;
2620         u32             lut = 0;
2621         u64             set_hena = 0, hena;
2622         int             i, j, que_id;
2623         u32             rss_seed[IXL_RSS_KEY_SIZE_REG];
2624 #ifdef RSS
2625         u32             rss_hash_config;
2626 #endif
2627         
2628         /* Don't set up RSS if using a single queue */
2629         if (vsi->num_queues == 1) {
2630                 wr32(hw, I40E_VFQF_HENA(0), 0);
2631                 wr32(hw, I40E_VFQF_HENA(1), 0);
2632                 ixl_flush(hw);
2633                 return;
2634         }
2635
2636 #ifdef RSS
2637         /* Fetch the configured RSS key */
2638         rss_getkey((uint8_t *) &rss_seed);
2639 #else
2640         ixl_get_default_rss_key(rss_seed);
2641 #endif
2642
2643         /* Fill out hash function seed */
2644         for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2645                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2646
2647         /* Enable PCTYPES for RSS: */
2648 #ifdef RSS
2649         rss_hash_config = rss_gethashconfig();
2650         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2651                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2652         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2653                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2654         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2655                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2656         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2657                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2658         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2659                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2660         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2661                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2662         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2663                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2664 #else
2665         set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2666 #endif
2667         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2668             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2669         hena |= set_hena;
2670         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2671         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2672
2673         /* Populate the LUT with max no. of queues in round robin fashion */
2674         for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2675                 if (j == vsi->num_queues)
2676                         j = 0;
2677 #ifdef RSS
2678                 /*
2679                  * Fetch the RSS bucket id for the given indirection entry.
2680                  * Cap it at the number of configured buckets (which is
2681                  * num_queues.)
2682                  */
2683                 que_id = rss_get_indirection_to_bucket(i);
2684                 que_id = que_id % vsi->num_queues;
2685 #else
2686                 que_id = j;
2687 #endif
2688                 /* lut = 4-byte sliding window of 4 lut entries */
2689                 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2690                 /* On i = 3, we have 4 entries in lut; write to the register */
2691                 if ((i & 3) == 3) {
2692                         wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2693                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2694                 }
2695         }
2696         ixl_flush(hw);
2697 }
2698
2699 static void
2700 ixlv_config_rss_pf(struct ixlv_sc *sc)
2701 {
2702         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2703             IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2704
2705         ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2706             IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2707
2708         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2709             IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2710 }
2711
2712 /*
2713 ** ixlv_config_rss - setup RSS 
2714 **
2715 ** RSS keys and table are cleared on VF reset.
2716 */
2717 static void
2718 ixlv_config_rss(struct ixlv_sc *sc)
2719 {
2720         if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2721                 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2722                 ixlv_config_rss_reg(sc);
2723         } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2724                 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2725                 ixlv_config_rss_pf(sc);
2726         } else
2727                 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2728 }
2729
2730 /*
2731 ** This routine refreshes vlan filters, called by init
2732 ** it scans the filter table and then updates the AQ
2733 */
2734 static void
2735 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2736 {
2737         struct ixl_vsi                  *vsi = &sc->vsi;
2738         struct ixlv_vlan_filter *f;
2739         int                             cnt = 0;
2740
2741         if (vsi->num_vlans == 0)
2742                 return;
2743         /*
2744         ** Scan the filter table for vlan entries,
2745         ** and if found call for the AQ update.
2746         */
2747         SLIST_FOREACH(f, sc->vlan_filters, next)
2748                 if (f->flags & IXL_FILTER_ADD)
2749                         cnt++;
2750         if (cnt > 0)
2751                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2752                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2753 }
2754
2755
2756 /*
2757 ** This routine adds new MAC filters to the sc's list;
2758 ** these are later added in hardware by sending a virtual
2759 ** channel message.
2760 */
2761 static int
2762 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2763 {
2764         struct ixlv_mac_filter  *f;
2765
2766         /* Does one already exist? */
2767         f = ixlv_find_mac_filter(sc, macaddr);
2768         if (f != NULL) {
2769                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2770                     MAC_FORMAT_ARGS(macaddr));
2771                 return (EEXIST);
2772         }
2773
2774         /* If not, get a new empty filter */
2775         f = ixlv_get_mac_filter(sc);
2776         if (f == NULL) {
2777                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2778                     __func__);
2779                 return (ENOMEM);
2780         }
2781
2782         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2783             MAC_FORMAT_ARGS(macaddr));
2784
2785         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2786         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2787         f->flags |= flags;
2788         return (0);
2789 }
2790
2791 /*
2792 ** Marks a MAC filter for deletion.
2793 */
2794 static int
2795 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2796 {
2797         struct ixlv_mac_filter  *f;
2798
2799         f = ixlv_find_mac_filter(sc, macaddr);
2800         if (f == NULL)
2801                 return (ENOENT);
2802
2803         f->flags |= IXL_FILTER_DEL;
2804         return (0);
2805 }
2806
2807 /*
2808 ** Tasklet handler for MSIX Adminq interrupts
2809 **  - done outside interrupt context since it might sleep
2810 */
2811 static void
2812 ixlv_do_adminq(void *context, int pending)
2813 {
2814         struct ixlv_sc          *sc = context;
2815
2816         mtx_lock(&sc->mtx);
2817         ixlv_do_adminq_locked(sc);
2818         mtx_unlock(&sc->mtx);
2819         return;
2820 }
2821
2822 static void
2823 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2824 {
2825         struct i40e_hw                  *hw = &sc->hw;
2826         struct i40e_arq_event_info      event;
2827         struct i40e_virtchnl_msg        *v_msg;
2828         device_t                        dev = sc->dev;
2829         u16                             result = 0;
2830         u32                             reg, oldreg;
2831         i40e_status                     ret;
2832         bool                            aq_error = false;
2833
2834         IXLV_CORE_LOCK_ASSERT(sc);
2835
2836         event.buf_len = IXL_AQ_BUF_SZ;
2837         event.msg_buf = sc->aq_buffer;
2838         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2839
2840         do {
2841                 ret = i40e_clean_arq_element(hw, &event, &result);
2842                 if (ret)
2843                         break;
2844                 ixlv_vc_completion(sc, v_msg->v_opcode,
2845                     v_msg->v_retval, event.msg_buf, event.msg_len);
2846                 if (result != 0)
2847                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2848         } while (result);
2849
2850         /* check for Admin queue errors */
2851         oldreg = reg = rd32(hw, hw->aq.arq.len);
2852         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2853                 device_printf(dev, "ARQ VF Error detected\n");
2854                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2855                 aq_error = true;
2856         }
2857         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2858                 device_printf(dev, "ARQ Overflow Error detected\n");
2859                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2860                 aq_error = true;
2861         }
2862         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2863                 device_printf(dev, "ARQ Critical Error detected\n");
2864                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2865                 aq_error = true;
2866         }
2867         if (oldreg != reg)
2868                 wr32(hw, hw->aq.arq.len, reg);
2869
2870         oldreg = reg = rd32(hw, hw->aq.asq.len);
2871         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2872                 device_printf(dev, "ASQ VF Error detected\n");
2873                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2874                 aq_error = true;
2875         }
2876         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2877                 device_printf(dev, "ASQ Overflow Error detected\n");
2878                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2879                 aq_error = true;
2880         }
2881         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2882                 device_printf(dev, "ASQ Critical Error detected\n");
2883                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2884                 aq_error = true;
2885         }
2886         if (oldreg != reg)
2887                 wr32(hw, hw->aq.asq.len, reg);
2888
2889         if (aq_error) {
2890                 /* Need to reset adapter */
2891                 device_printf(dev, "WARNING: Resetting!\n");
2892                 sc->init_state = IXLV_RESET_REQUIRED;
2893                 ixlv_stop(sc);
2894                 ixlv_init_locked(sc);
2895         }
2896         ixlv_enable_adminq_irq(hw);
2897 }
2898
2899 static void
2900 ixlv_add_sysctls(struct ixlv_sc *sc)
2901 {
2902         device_t dev = sc->dev;
2903         struct ixl_vsi *vsi = &sc->vsi;
2904         struct i40e_eth_stats *es = &vsi->eth_stats;
2905
2906         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2907         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2908         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2909
2910         struct sysctl_oid *vsi_node, *queue_node;
2911         struct sysctl_oid_list *vsi_list, *queue_list;
2912
2913 #define QUEUE_NAME_LEN 32
2914         char queue_namebuf[QUEUE_NAME_LEN];
2915
2916         struct ixl_queue *queues = vsi->queues;
2917         struct tx_ring *txr;
2918         struct rx_ring *rxr;
2919
2920         /* Driver statistics sysctls */
2921         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2922                         CTLFLAG_RD, &sc->watchdog_events,
2923                         "Watchdog timeouts");
2924         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2925                         CTLFLAG_RD, &sc->admin_irq,
2926                         "Admin Queue IRQ Handled");
2927
2928         /* VSI statistics sysctls */
2929         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2930                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2931         vsi_list = SYSCTL_CHILDREN(vsi_node);
2932
2933         struct ixl_sysctl_info ctls[] =
2934         {
2935                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2936                 {&es->rx_unicast, "ucast_pkts_rcvd",
2937                         "Unicast Packets Received"},
2938                 {&es->rx_multicast, "mcast_pkts_rcvd",
2939                         "Multicast Packets Received"},
2940                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2941                         "Broadcast Packets Received"},
2942                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2943                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2944                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2945                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2946                 {&es->tx_multicast, "mcast_pkts_txd",
2947                         "Multicast Packets Transmitted"},
2948                 {&es->tx_broadcast, "bcast_pkts_txd",
2949                         "Broadcast Packets Transmitted"},
2950                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2951                 // end
2952                 {0,0,0}
2953         };
2954         struct ixl_sysctl_info *entry = ctls;
2955         while (entry->stat != NULL)
2956         {
2957                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2958                                 CTLFLAG_RD, entry->stat,
2959                                 entry->description);
2960                 entry++;
2961         }
2962
2963         /* Queue sysctls */
2964         for (int q = 0; q < vsi->num_queues; q++) {
2965                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2966                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2967                                              CTLFLAG_RD, NULL, "Queue Name");
2968                 queue_list = SYSCTL_CHILDREN(queue_node);
2969
2970                 txr = &(queues[q].txr);
2971                 rxr = &(queues[q].rxr);
2972
2973                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2974                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2975                                 "m_defrag() failed");
2976                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2977                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2978                                 "Driver dropped packets");
2979                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2980                                 CTLFLAG_RD, &(queues[q].irqs),
2981                                 "irqs on this queue");
2982                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2983                                 CTLFLAG_RD, &(queues[q].tso),
2984                                 "TSO");
2985                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2986                                 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2987                                 "Driver tx dma failure in xmit");
2988                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2989                                 CTLFLAG_RD, &(txr->no_desc),
2990                                 "Queue No Descriptor Available");
2991                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2992                                 CTLFLAG_RD, &(txr->total_packets),
2993                                 "Queue Packets Transmitted");
2994                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2995                                 CTLFLAG_RD, &(txr->tx_bytes),
2996                                 "Queue Bytes Transmitted");
2997                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2998                                 CTLFLAG_RD, &(rxr->rx_packets),
2999                                 "Queue Packets Received");
3000                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3001                                 CTLFLAG_RD, &(rxr->rx_bytes),
3002                                 "Queue Bytes Received");
3003                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3004                                 CTLFLAG_RD, &(rxr->itr), 0,
3005                                 "Queue Rx ITR Interval");
3006                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3007                                 CTLFLAG_RD, &(txr->itr), 0,
3008                                 "Queue Tx ITR Interval");
3009
3010 #ifdef IXL_DEBUG
3011                 /* Examine queue state */
3012                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
3013                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3014                                 sizeof(struct ixl_queue),
3015                                 ixlv_sysctl_qtx_tail_handler, "IU",
3016                                 "Queue Transmit Descriptor Tail");
3017                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
3018                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3019                                 sizeof(struct ixl_queue),
3020                                 ixlv_sysctl_qrx_tail_handler, "IU",
3021                                 "Queue Receive Descriptor Tail");
3022                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3023                                 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3024                                 "Ticks before watchdog event is triggered");
3025 #endif
3026         }
3027 }
3028
3029 static void
3030 ixlv_init_filters(struct ixlv_sc *sc)
3031 {
3032         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3033             M_DEVBUF, M_NOWAIT | M_ZERO);
3034         SLIST_INIT(sc->mac_filters);
3035         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3036             M_DEVBUF, M_NOWAIT | M_ZERO);
3037         SLIST_INIT(sc->vlan_filters);
3038         return;
3039 }
3040
3041 static void
3042 ixlv_free_filters(struct ixlv_sc *sc)
3043 {
3044         struct ixlv_mac_filter *f;
3045         struct ixlv_vlan_filter *v;
3046
3047         while (!SLIST_EMPTY(sc->mac_filters)) {
3048                 f = SLIST_FIRST(sc->mac_filters);
3049                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3050                 free(f, M_DEVBUF);
3051         }
3052         while (!SLIST_EMPTY(sc->vlan_filters)) {
3053                 v = SLIST_FIRST(sc->vlan_filters);
3054                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3055                 free(v, M_DEVBUF);
3056         }
3057         return;
3058 }
3059
3060 #ifdef IXL_DEBUG
3061 /**
3062  * ixlv_sysctl_qtx_tail_handler
3063  * Retrieves I40E_QTX_TAIL1 value from hardware
3064  * for a sysctl.
3065  */
3066 static int 
3067 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3068 {
3069         struct ixl_queue *que;
3070         int error;
3071         u32 val;
3072
3073         que = ((struct ixl_queue *)oidp->oid_arg1);
3074         if (!que) return 0;
3075
3076         val = rd32(que->vsi->hw, que->txr.tail);
3077         error = sysctl_handle_int(oidp, &val, 0, req);
3078         if (error || !req->newptr)
3079                 return error;
3080         return (0);
3081 }
3082
3083 /**
3084  * ixlv_sysctl_qrx_tail_handler
3085  * Retrieves I40E_QRX_TAIL1 value from hardware
3086  * for a sysctl.
3087  */
3088 static int 
3089 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3090 {
3091         struct ixl_queue *que;
3092         int error;
3093         u32 val;
3094
3095         que = ((struct ixl_queue *)oidp->oid_arg1);
3096         if (!que) return 0;
3097
3098         val = rd32(que->vsi->hw, que->rxr.tail);
3099         error = sysctl_handle_int(oidp, &val, 0, req);
3100         if (error || !req->newptr)
3101                 return error;
3102         return (0);
3103 }
3104 #endif
3105