]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
Speed up vm_page_array initialization.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixl.h"
36 #include "ixlv.h"
37
38 /*********************************************************************
39  *  Driver version
40  *********************************************************************/
41 char ixlv_driver_version[] = "1.4.12-k";
42
43 /*********************************************************************
44  *  PCI Device ID Table
45  *
46  *  Used by probe to select devices to load on
47  *  Last field stores an index into ixlv_strings
48  *  Last entry must be all 0s
49  *
50  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
51  *********************************************************************/
52
53 static ixl_vendor_info_t ixlv_vendor_info_array[] =
54 {
55         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
56         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
57         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
58         /* required last entry */
59         {0, 0, 0, 0, 0}
60 };
61
62 /*********************************************************************
63  *  Table of branding strings
64  *********************************************************************/
65
66 static char    *ixlv_strings[] = {
67         "Intel(R) Ethernet Connection XL710/X722 VF Driver"
68 };
69
70
71 /*********************************************************************
72  *  Function prototypes
73  *********************************************************************/
74 static int      ixlv_probe(device_t);
75 static int      ixlv_attach(device_t);
76 static int      ixlv_detach(device_t);
77 static int      ixlv_shutdown(device_t);
78 static void     ixlv_init_locked(struct ixlv_sc *);
79 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
80 static void     ixlv_free_pci_resources(struct ixlv_sc *);
81 static int      ixlv_assign_msix(struct ixlv_sc *);
82 static int      ixlv_init_msix(struct ixlv_sc *);
83 static int      ixlv_init_taskqueue(struct ixlv_sc *);
84 static int      ixlv_setup_queues(struct ixlv_sc *);
85 static void     ixlv_config_rss(struct ixlv_sc *);
86 static void     ixlv_stop(struct ixlv_sc *);
87 static void     ixlv_add_multi(struct ixl_vsi *);
88 static void     ixlv_del_multi(struct ixl_vsi *);
89 static void     ixlv_free_queues(struct ixl_vsi *);
90 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
91 static int      ixlv_teardown_adminq_msix(struct ixlv_sc *);
92
93 static int      ixlv_media_change(struct ifnet *);
94 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
95
96 static void     ixlv_local_timer(void *);
97
98 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
99 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
100 static void     ixlv_init_filters(struct ixlv_sc *);
101 static void     ixlv_free_filters(struct ixlv_sc *);
102
103 static void     ixlv_msix_que(void *);
104 static void     ixlv_msix_adminq(void *);
105 static void     ixlv_do_adminq(void *, int);
106 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
107 static void     ixlv_handle_que(void *, int);
108 static int      ixlv_reset(struct ixlv_sc *);
109 static int      ixlv_reset_complete(struct i40e_hw *);
110 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
111 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
112 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
113                     enum i40e_status_code);
114 static void     ixlv_configure_itr(struct ixlv_sc *);
115
116 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
117 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
118 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
119 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
120
121 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
122 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
123 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
124
125 static void     ixlv_init_hw(struct ixlv_sc *);
126 static int      ixlv_setup_vc(struct ixlv_sc *);
127 static int      ixlv_vf_config(struct ixlv_sc *);
128
129 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
130                     struct ifnet *, int);
131
132 static void     ixlv_add_sysctls(struct ixlv_sc *);
133 #ifdef IXL_DEBUG
134 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
135 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
136 #endif
137
138 /*********************************************************************
139  *  FreeBSD Device Interface Entry Points
140  *********************************************************************/
141
142 static device_method_t ixlv_methods[] = {
143         /* Device interface */
144         DEVMETHOD(device_probe, ixlv_probe),
145         DEVMETHOD(device_attach, ixlv_attach),
146         DEVMETHOD(device_detach, ixlv_detach),
147         DEVMETHOD(device_shutdown, ixlv_shutdown),
148         {0, 0}
149 };
150
151 static driver_t ixlv_driver = {
152         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
153 };
154
155 devclass_t ixlv_devclass;
156 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
157
158 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
159 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
160
161 /*
162 ** TUNEABLE PARAMETERS:
163 */
164
165 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
166                    "IXLV driver parameters");
167
168 /*
169 ** Number of descriptors per ring:
170 **   - TX and RX are the same size
171 */
172 static int ixlv_ringsz = IXL_DEFAULT_RING;
173 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
174 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
175     &ixlv_ringsz, 0, "Descriptor Ring Size");
176
177 /* Set to zero to auto calculate  */
178 int ixlv_max_queues = 0;
179 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
180 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
181     &ixlv_max_queues, 0, "Number of Queues");
182
183 /*
184 ** Number of entries in Tx queue buf_ring.
185 ** Increasing this will reduce the number of
186 ** errors when transmitting fragmented UDP
187 ** packets.
188 */
189 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
190 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
192     &ixlv_txbrsz, 0, "TX Buf Ring Size");
193
194 /*
195 ** Controls for Interrupt Throttling
196 **      - true/false for dynamic adjustment
197 **      - default values for static ITR
198 */
199 int ixlv_dynamic_rx_itr = 0;
200 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
201 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
202     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
203
204 int ixlv_dynamic_tx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
207     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
208
209 int ixlv_rx_itr = IXL_ITR_8K;
210 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
212     &ixlv_rx_itr, 0, "RX Interrupt Rate");
213
214 int ixlv_tx_itr = IXL_ITR_4K;
215 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
217     &ixlv_tx_itr, 0, "TX Interrupt Rate");
218
219 /*********************************************************************
220  *  Device identification routine
221  *
222  *  ixlv_probe determines if the driver should be loaded on
223  *  the hardware based on PCI vendor/device id of the device.
224  *
225  *  return BUS_PROBE_DEFAULT on success, positive on failure
226  *********************************************************************/
227
228 static int
229 ixlv_probe(device_t dev)
230 {
231         ixl_vendor_info_t *ent;
232
233         u16     pci_vendor_id, pci_device_id;
234         u16     pci_subvendor_id, pci_subdevice_id;
235         char    device_name[256];
236
237 #if 0
238         INIT_DEBUGOUT("ixlv_probe: begin");
239 #endif
240
241         pci_vendor_id = pci_get_vendor(dev);
242         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
243                 return (ENXIO);
244
245         pci_device_id = pci_get_device(dev);
246         pci_subvendor_id = pci_get_subvendor(dev);
247         pci_subdevice_id = pci_get_subdevice(dev);
248
249         ent = ixlv_vendor_info_array;
250         while (ent->vendor_id != 0) {
251                 if ((pci_vendor_id == ent->vendor_id) &&
252                     (pci_device_id == ent->device_id) &&
253
254                     ((pci_subvendor_id == ent->subvendor_id) ||
255                      (ent->subvendor_id == 0)) &&
256
257                     ((pci_subdevice_id == ent->subdevice_id) ||
258                      (ent->subdevice_id == 0))) {
259                         sprintf(device_name, "%s, Version - %s",
260                                 ixlv_strings[ent->index],
261                                 ixlv_driver_version);
262                         device_set_desc_copy(dev, device_name);
263                         return (BUS_PROBE_DEFAULT);
264                 }
265                 ent++;
266         }
267         return (ENXIO);
268 }
269
270 /*********************************************************************
271  *  Device initialization routine
272  *
273  *  The attach entry point is called when the driver is being loaded.
274  *  This routine identifies the type of hardware, allocates all resources
275  *  and initializes the hardware.
276  *
277  *  return 0 on success, positive on failure
278  *********************************************************************/
279
280 static int
281 ixlv_attach(device_t dev)
282 {
283         struct ixlv_sc  *sc;
284         struct i40e_hw  *hw;
285         struct ixl_vsi  *vsi;
286         int             error = 0;
287
288         INIT_DBG_DEV(dev, "begin");
289
290         /* Allocate, clear, and link in our primary soft structure */
291         sc = device_get_softc(dev);
292         sc->dev = sc->osdep.dev = dev;
293         hw = &sc->hw;
294         vsi = &sc->vsi;
295         vsi->dev = dev;
296
297         /* Initialize hw struct */
298         ixlv_init_hw(sc);
299
300         /* Allocate filter lists */
301         ixlv_init_filters(sc);
302
303         /* Core Lock Init */
304         mtx_init(&sc->mtx, device_get_nameunit(dev),
305             "IXL SC Lock", MTX_DEF);
306
307         /* Set up the timer callout */
308         callout_init_mtx(&sc->timer, &sc->mtx, 0);
309
310         /* Do PCI setup - map BAR0, etc */
311         if (ixlv_allocate_pci_resources(sc)) {
312                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
313                     __func__);
314                 error = ENXIO;
315                 goto err_early;
316         }
317
318         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
319
320         error = i40e_set_mac_type(hw);
321         if (error) {
322                 device_printf(dev, "%s: set_mac_type failed: %d\n",
323                     __func__, error);
324                 goto err_pci_res;
325         }
326
327         error = ixlv_reset_complete(hw);
328         if (error) {
329                 device_printf(dev, "%s: Device is still being reset\n",
330                     __func__);
331                 goto err_pci_res;
332         }
333
334         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
335
336         error = ixlv_setup_vc(sc);
337         if (error) {
338                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
339                     __func__, error);
340                 goto err_pci_res;
341         }
342
343         INIT_DBG_DEV(dev, "PF API version verified");
344
345         /* Need API version before sending reset message */
346         error = ixlv_reset(sc);
347         if (error) {
348                 device_printf(dev, "VF reset failed; reload the driver\n");
349                 goto err_aq;
350         }
351
352         INIT_DBG_DEV(dev, "VF reset complete");
353
354         /* Ask for VF config from PF */
355         error = ixlv_vf_config(sc);
356         if (error) {
357                 device_printf(dev, "Error getting configuration from PF: %d\n",
358                     error);
359                 goto err_aq;
360         }
361
362         device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
363             sc->vf_res->num_vsis,
364             sc->vf_res->num_queue_pairs,
365             sc->vf_res->max_vectors,
366             sc->vf_res->rss_key_size,
367             sc->vf_res->rss_lut_size);
368 #ifdef IXL_DEBUG
369         device_printf(dev, "Offload flags: 0x%b\n",
370             sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
371 #endif
372
373         /* got VF config message back from PF, now we can parse it */
374         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
375                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
376                         sc->vsi_res = &sc->vf_res->vsi_res[i];
377         }
378         if (!sc->vsi_res) {
379                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
380                 error = EIO;
381                 goto err_res_buf;
382         }
383
384         INIT_DBG_DEV(dev, "Resource Acquisition complete");
385
386         /* If no mac address was assigned just make a random one */
387         if (!ixlv_check_ether_addr(hw->mac.addr)) {
388                 u8 addr[ETHER_ADDR_LEN];
389                 arc4rand(&addr, sizeof(addr), 0);
390                 addr[0] &= 0xFE;
391                 addr[0] |= 0x02;
392                 bcopy(addr, hw->mac.addr, sizeof(addr));
393         }
394
395         /* Now that the number of queues for this VF is known, set up interrupts */
396         sc->msix = ixlv_init_msix(sc);
397         /* We fail without MSIX support */
398         if (sc->msix == 0) {
399                 error = ENXIO;
400                 goto err_res_buf;
401         }
402
403         vsi->id = sc->vsi_res->vsi_id;
404         vsi->back = (void *)sc;
405         sc->link_up = TRUE;
406
407         /* This allocates the memory and early settings */
408         if (ixlv_setup_queues(sc) != 0) {
409                 device_printf(dev, "%s: setup queues failed!\n",
410                     __func__);
411                 error = EIO;
412                 goto out;
413         }
414
415         /* Setup the stack interface */
416         if (ixlv_setup_interface(dev, sc) != 0) {
417                 device_printf(dev, "%s: setup interface failed!\n",
418                     __func__);
419                 error = EIO;
420                 goto out;
421         }
422
423         INIT_DBG_DEV(dev, "Queue memory and interface setup");
424
425         /* Do queue interrupt setup */
426         if (ixlv_assign_msix(sc) != 0) {
427                 device_printf(dev, "%s: allocating queue interrupts failed!\n",
428                     __func__);
429                 error = ENXIO;
430                 goto out;
431         }
432
433         /* Start AdminQ taskqueue */
434         ixlv_init_taskqueue(sc);
435
436         /* Initialize stats */
437         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
438         ixlv_add_sysctls(sc);
439
440         /* Register for VLAN events */
441         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
442             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
443         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
444             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
445
446         /* We want AQ enabled early */
447         ixlv_enable_adminq_irq(hw);
448
449         /* Set things up to run init */
450         sc->init_state = IXLV_INIT_READY;
451
452         ixl_vc_init_mgr(sc, &sc->vc_mgr);
453
454         INIT_DBG_DEV(dev, "end");
455         return (error);
456
457 out:
458         ixlv_free_queues(vsi);
459 err_res_buf:
460         free(sc->vf_res, M_DEVBUF);
461 err_aq:
462         i40e_shutdown_adminq(hw);
463 err_pci_res:
464         ixlv_free_pci_resources(sc);
465 err_early:
466         mtx_destroy(&sc->mtx);
467         ixlv_free_filters(sc);
468         INIT_DBG_DEV(dev, "end: error %d", error);
469         return (error);
470 }
471
472 /*********************************************************************
473  *  Device removal routine
474  *
475  *  The detach entry point is called when the driver is being removed.
476  *  This routine stops the adapter and deallocates all the resources
477  *  that were allocated for driver operation.
478  *
479  *  return 0 on success, positive on failure
480  *********************************************************************/
481
482 static int
483 ixlv_detach(device_t dev)
484 {
485         struct ixlv_sc  *sc = device_get_softc(dev);
486         struct ixl_vsi  *vsi = &sc->vsi;
487         struct i40e_hw  *hw = &sc->hw;
488         enum i40e_status_code   status;
489
490         INIT_DBG_DEV(dev, "begin");
491
492         /* Make sure VLANS are not using driver */
493         if (vsi->ifp->if_vlantrunk != NULL) {
494                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
495                 return (EBUSY);
496         }
497
498         /* Stop driver */
499         ether_ifdetach(vsi->ifp);
500         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
501                 mtx_lock(&sc->mtx);     
502                 ixlv_stop(sc);
503                 mtx_unlock(&sc->mtx);   
504         }
505
506         /* Unregister VLAN events */
507         if (vsi->vlan_attach != NULL)
508                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
509         if (vsi->vlan_detach != NULL)
510                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
511
512         /* Drain VC mgr */
513         callout_drain(&sc->vc_mgr.callout);
514
515         ixlv_disable_adminq_irq(hw);
516         ixlv_teardown_adminq_msix(sc);
517         /* Drain admin queue taskqueue */
518         taskqueue_free(sc->tq);
519         status = i40e_shutdown_adminq(&sc->hw);
520         if (status != I40E_SUCCESS) {
521                 device_printf(dev,
522                     "i40e_shutdown_adminq() failed with status %s\n",
523                     i40e_stat_str(hw, status));
524         }
525
526         if_free(vsi->ifp);
527         free(sc->vf_res, M_DEVBUF);
528         ixlv_free_pci_resources(sc);
529         ixlv_free_queues(vsi);
530         ixlv_free_filters(sc);
531
532         bus_generic_detach(dev);
533         mtx_destroy(&sc->mtx);
534         INIT_DBG_DEV(dev, "end");
535         return (0);
536 }
537
538 /*********************************************************************
539  *
540  *  Shutdown entry point
541  *
542  **********************************************************************/
543
544 static int
545 ixlv_shutdown(device_t dev)
546 {
547         struct ixlv_sc  *sc = device_get_softc(dev);
548
549         INIT_DBG_DEV(dev, "begin");
550
551         mtx_lock(&sc->mtx);     
552         ixlv_stop(sc);
553         mtx_unlock(&sc->mtx);   
554
555         INIT_DBG_DEV(dev, "end");
556         return (0);
557 }
558
559 /*
560  * Configure TXCSUM(IPV6) and TSO(4/6)
561  *      - the hardware handles these together so we
562  *        need to tweak them 
563  */
564 static void
565 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
566 {
567         /* Enable/disable TXCSUM/TSO4 */
568         if (!(ifp->if_capenable & IFCAP_TXCSUM)
569             && !(ifp->if_capenable & IFCAP_TSO4)) {
570                 if (mask & IFCAP_TXCSUM) {
571                         ifp->if_capenable |= IFCAP_TXCSUM;
572                         /* enable TXCSUM, restore TSO if previously enabled */
573                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
574                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
575                                 ifp->if_capenable |= IFCAP_TSO4;
576                         }
577                 }
578                 else if (mask & IFCAP_TSO4) {
579                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
580                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
581                         if_printf(ifp,
582                             "TSO4 requires txcsum, enabling both...\n");
583                 }
584         } else if((ifp->if_capenable & IFCAP_TXCSUM)
585             && !(ifp->if_capenable & IFCAP_TSO4)) {
586                 if (mask & IFCAP_TXCSUM)
587                         ifp->if_capenable &= ~IFCAP_TXCSUM;
588                 else if (mask & IFCAP_TSO4)
589                         ifp->if_capenable |= IFCAP_TSO4;
590         } else if((ifp->if_capenable & IFCAP_TXCSUM)
591             && (ifp->if_capenable & IFCAP_TSO4)) {
592                 if (mask & IFCAP_TXCSUM) {
593                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
594                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
595                         if_printf(ifp, 
596                             "TSO4 requires txcsum, disabling both...\n");
597                 } else if (mask & IFCAP_TSO4)
598                         ifp->if_capenable &= ~IFCAP_TSO4;
599         }
600
601         /* Enable/disable TXCSUM_IPV6/TSO6 */
602         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603             && !(ifp->if_capenable & IFCAP_TSO6)) {
604                 if (mask & IFCAP_TXCSUM_IPV6) {
605                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
606                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
607                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
608                                 ifp->if_capenable |= IFCAP_TSO6;
609                         }
610                 } else if (mask & IFCAP_TSO6) {
611                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
612                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
613                         if_printf(ifp,
614                             "TSO6 requires txcsum6, enabling both...\n");
615                 }
616         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
617             && !(ifp->if_capenable & IFCAP_TSO6)) {
618                 if (mask & IFCAP_TXCSUM_IPV6)
619                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
620                 else if (mask & IFCAP_TSO6)
621                         ifp->if_capenable |= IFCAP_TSO6;
622         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
623             && (ifp->if_capenable & IFCAP_TSO6)) {
624                 if (mask & IFCAP_TXCSUM_IPV6) {
625                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
626                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
627                         if_printf(ifp,
628                             "TSO6 requires txcsum6, disabling both...\n");
629                 } else if (mask & IFCAP_TSO6)
630                         ifp->if_capenable &= ~IFCAP_TSO6;
631         }
632 }
633
634 /*********************************************************************
635  *  Ioctl entry point
636  *
637  *  ixlv_ioctl is called when the user wants to configure the
638  *  interface.
639  *
640  *  return 0 on success, positive on failure
641  **********************************************************************/
642
643 static int
644 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
645 {
646         struct ixl_vsi          *vsi = ifp->if_softc;
647         struct ixlv_sc  *sc = vsi->back;
648         struct ifreq            *ifr = (struct ifreq *)data;
649 #if defined(INET) || defined(INET6)
650         struct ifaddr           *ifa = (struct ifaddr *)data;
651         bool                    avoid_reset = FALSE;
652 #endif
653         int                     error = 0;
654
655
656         switch (command) {
657
658         case SIOCSIFADDR:
659 #ifdef INET
660                 if (ifa->ifa_addr->sa_family == AF_INET)
661                         avoid_reset = TRUE;
662 #endif
663 #ifdef INET6
664                 if (ifa->ifa_addr->sa_family == AF_INET6)
665                         avoid_reset = TRUE;
666 #endif
667 #if defined(INET) || defined(INET6)
668                 /*
669                 ** Calling init results in link renegotiation,
670                 ** so we avoid doing it when possible.
671                 */
672                 if (avoid_reset) {
673                         ifp->if_flags |= IFF_UP;
674                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
675                                 ixlv_init(vsi);
676 #ifdef INET
677                         if (!(ifp->if_flags & IFF_NOARP))
678                                 arp_ifinit(ifp, ifa);
679 #endif
680                 } else
681                         error = ether_ioctl(ifp, command, data);
682                 break;
683 #endif
684         case SIOCSIFMTU:
685                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
686                 mtx_lock(&sc->mtx);
687                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
688                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
689                         error = EINVAL;
690                         IOCTL_DBG_IF(ifp, "mtu too large");
691                 } else {
692                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
693                         // ERJ: Interestingly enough, these types don't match
694                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
695                         vsi->max_frame_size =
696                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
697                             + ETHER_VLAN_ENCAP_LEN;
698                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
699                                 ixlv_init_locked(sc);
700                 }
701                 mtx_unlock(&sc->mtx);
702                 break;
703         case SIOCSIFFLAGS:
704                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
705                 mtx_lock(&sc->mtx);
706                 if (ifp->if_flags & IFF_UP) {
707                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
708                                 ixlv_init_locked(sc);
709                 } else
710                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
711                                 ixlv_stop(sc);
712                 sc->if_flags = ifp->if_flags;
713                 mtx_unlock(&sc->mtx);
714                 break;
715         case SIOCADDMULTI:
716                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
717                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
718                         mtx_lock(&sc->mtx);
719                         ixlv_disable_intr(vsi);
720                         ixlv_add_multi(vsi);
721                         ixlv_enable_intr(vsi);
722                         mtx_unlock(&sc->mtx);
723                 }
724                 break;
725         case SIOCDELMULTI:
726                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
727                 if (sc->init_state == IXLV_RUNNING) {
728                         mtx_lock(&sc->mtx);
729                         ixlv_disable_intr(vsi);
730                         ixlv_del_multi(vsi);
731                         ixlv_enable_intr(vsi);
732                         mtx_unlock(&sc->mtx);
733                 }
734                 break;
735         case SIOCSIFMEDIA:
736         case SIOCGIFMEDIA:
737                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
738                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
739                 break;
740         case SIOCSIFCAP:
741         {
742                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
743                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
744
745                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
746
747                 if (mask & IFCAP_RXCSUM)
748                         ifp->if_capenable ^= IFCAP_RXCSUM;
749                 if (mask & IFCAP_RXCSUM_IPV6)
750                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
751                 if (mask & IFCAP_LRO)
752                         ifp->if_capenable ^= IFCAP_LRO;
753                 if (mask & IFCAP_VLAN_HWTAGGING)
754                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
755                 if (mask & IFCAP_VLAN_HWFILTER)
756                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
757                 if (mask & IFCAP_VLAN_HWTSO)
758                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
759                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
760                         ixlv_init(vsi);
761                 }
762                 VLAN_CAPABILITIES(ifp);
763
764                 break;
765         }
766
767         default:
768                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
769                 error = ether_ioctl(ifp, command, data);
770                 break;
771         }
772
773         return (error);
774 }
775
776 /*
777 ** To do a reinit on the VF is unfortunately more complicated
778 ** than a physical device, we must have the PF more or less
779 ** completely recreate our memory, so many things that were
780 ** done only once at attach in traditional drivers now must be
781 ** redone at each reinitialization. This function does that
782 ** 'prelude' so we can then call the normal locked init code.
783 */
784 int
785 ixlv_reinit_locked(struct ixlv_sc *sc)
786 {
787         struct i40e_hw          *hw = &sc->hw;
788         struct ixl_vsi          *vsi = &sc->vsi;
789         struct ifnet            *ifp = vsi->ifp;
790         struct ixlv_mac_filter  *mf, *mf_temp;
791         struct ixlv_vlan_filter *vf;
792         int                     error = 0;
793
794         INIT_DBG_IF(ifp, "begin");
795
796         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
797                 ixlv_stop(sc);
798
799         error = ixlv_reset(sc);
800
801         INIT_DBG_IF(ifp, "VF was reset");
802
803         /* set the state in case we went thru RESET */
804         sc->init_state = IXLV_RUNNING;
805
806         /*
807         ** Resetting the VF drops all filters from hardware;
808         ** we need to mark them to be re-added in init.
809         */
810         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
811                 if (mf->flags & IXL_FILTER_DEL) {
812                         SLIST_REMOVE(sc->mac_filters, mf,
813                             ixlv_mac_filter, next);
814                         free(mf, M_DEVBUF);
815                 } else
816                         mf->flags |= IXL_FILTER_ADD;
817         }
818         if (vsi->num_vlans != 0)
819                 SLIST_FOREACH(vf, sc->vlan_filters, next)
820                         vf->flags = IXL_FILTER_ADD;
821         else { /* clean any stale filters */
822                 while (!SLIST_EMPTY(sc->vlan_filters)) {
823                         vf = SLIST_FIRST(sc->vlan_filters);
824                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
825                         free(vf, M_DEVBUF);
826                 }
827         }
828
829         ixlv_enable_adminq_irq(hw);
830         ixl_vc_flush(&sc->vc_mgr);
831
832         INIT_DBG_IF(ifp, "end");
833         return (error);
834 }
835
836 static void
837 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
838         enum i40e_status_code code)
839 {
840         struct ixlv_sc *sc;
841
842         sc = arg;
843
844         /*
845          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
846          * happens while a command is in progress, so we don't print an error
847          * in that case.
848          */
849         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
850                 if_printf(sc->vsi.ifp,
851                     "Error %s waiting for PF to complete operation %d\n",
852                     i40e_stat_str(&sc->hw, code), cmd->request);
853         }
854 }
855
856 static void
857 ixlv_init_locked(struct ixlv_sc *sc)
858 {
859         struct i40e_hw          *hw = &sc->hw;
860         struct ixl_vsi          *vsi = &sc->vsi;
861         struct ixl_queue        *que = vsi->queues;
862         struct ifnet            *ifp = vsi->ifp;
863         int                      error = 0;
864
865         INIT_DBG_IF(ifp, "begin");
866
867         IXLV_CORE_LOCK_ASSERT(sc);
868
869         /* Do a reinit first if an init has already been done */
870         if ((sc->init_state == IXLV_RUNNING) ||
871             (sc->init_state == IXLV_RESET_REQUIRED) ||
872             (sc->init_state == IXLV_RESET_PENDING))
873                 error = ixlv_reinit_locked(sc);
874         /* Don't bother with init if we failed reinit */
875         if (error)
876                 goto init_done;
877
878         /* Remove existing MAC filter if new MAC addr is set */
879         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
880                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
881                 if (error == 0)
882                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
883                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
884                             sc);
885         }
886
887         /* Check for an LAA mac address... */
888         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
889
890         ifp->if_hwassist = 0;
891         if (ifp->if_capenable & IFCAP_TSO)
892                 ifp->if_hwassist |= CSUM_TSO;
893         if (ifp->if_capenable & IFCAP_TXCSUM)
894                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
895         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
896                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
897
898         /* Add mac filter for this VF to PF */
899         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
900                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
901                 if (!error || error == EEXIST)
902                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
903                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
904                             sc);
905         }
906
907         /* Setup vlan's if needed */
908         ixlv_setup_vlan_filters(sc);
909
910         /* Prepare the queues for operation */
911         for (int i = 0; i < vsi->num_queues; i++, que++) {
912                 struct  rx_ring *rxr = &que->rxr;
913
914                 ixl_init_tx_ring(que);
915
916                 if (vsi->max_frame_size <= MCLBYTES)
917                         rxr->mbuf_sz = MCLBYTES;
918                 else
919                         rxr->mbuf_sz = MJUMPAGESIZE;
920                 ixl_init_rx_ring(que);
921         }
922
923         /* Set initial ITR values */
924         ixlv_configure_itr(sc);
925
926         /* Configure queues */
927         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
928             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
929
930         /* Set up RSS */
931         ixlv_config_rss(sc);
932
933         /* Map vectors */
934         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
935             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
936
937         /* Enable queues */
938         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
939             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
940
941         /* Start the local timer */
942         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
943
944         sc->init_state = IXLV_RUNNING;
945
946 init_done:
947         INIT_DBG_IF(ifp, "end");
948         return;
949 }
950
951 /*
952 **  Init entry point for the stack
953 */
954 void
955 ixlv_init(void *arg)
956 {
957         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
958         struct ixlv_sc *sc = vsi->back;
959         int retries = 0;
960
961         /* Prevent init from running again while waiting for AQ calls
962          * made in init_locked() to complete. */
963         mtx_lock(&sc->mtx);
964         if (sc->init_in_progress) {
965                 mtx_unlock(&sc->mtx);
966                 return;
967         } else
968                 sc->init_in_progress = true;
969
970         ixlv_init_locked(sc);
971         mtx_unlock(&sc->mtx);
972
973         /* Wait for init_locked to finish */
974         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
975             && ++retries < IXLV_MAX_INIT_WAIT) {
976                 i40e_msec_pause(25);
977         }
978         if (retries >= IXLV_MAX_INIT_WAIT) {
979                 if_printf(vsi->ifp,
980                     "Init failed to complete in allotted time!\n");
981         }
982
983         mtx_lock(&sc->mtx);
984         sc->init_in_progress = false;
985         mtx_unlock(&sc->mtx);
986 }
987
988 /*
989  * ixlv_attach() helper function; gathers information about
990  * the (virtual) hardware for use elsewhere in the driver.
991  */
992 static void
993 ixlv_init_hw(struct ixlv_sc *sc)
994 {
995         struct i40e_hw *hw = &sc->hw;
996         device_t dev = sc->dev;
997         
998         /* Save off the information about this board */
999         hw->vendor_id = pci_get_vendor(dev);
1000         hw->device_id = pci_get_device(dev);
1001         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1002         hw->subsystem_vendor_id =
1003             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1004         hw->subsystem_device_id =
1005             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1006
1007         hw->bus.device = pci_get_slot(dev);
1008         hw->bus.func = pci_get_function(dev);
1009 }
1010
1011 /*
1012  * ixlv_attach() helper function; initalizes the admin queue
1013  * and attempts to establish contact with the PF by
1014  * retrying the initial "API version" message several times
1015  * or until the PF responds.
1016  */
1017 static int
1018 ixlv_setup_vc(struct ixlv_sc *sc)
1019 {
1020         struct i40e_hw *hw = &sc->hw;
1021         device_t dev = sc->dev;
1022         int error = 0, ret_error = 0, asq_retries = 0;
1023         bool send_api_ver_retried = 0;
1024
1025         /* Need to set these AQ paramters before initializing AQ */
1026         hw->aq.num_arq_entries = IXL_AQ_LEN;
1027         hw->aq.num_asq_entries = IXL_AQ_LEN;
1028         hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1029         hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1030
1031         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1032                 /* Initialize admin queue */
1033                 error = i40e_init_adminq(hw);
1034                 if (error) {
1035                         device_printf(dev, "%s: init_adminq failed: %d\n",
1036                             __func__, error);
1037                         ret_error = 1;
1038                         continue;
1039                 }
1040
1041                 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1042                     " send_api_ver attempt %d", i+1);
1043
1044 retry_send:
1045                 /* Send VF's API version */
1046                 error = ixlv_send_api_ver(sc);
1047                 if (error) {
1048                         i40e_shutdown_adminq(hw);
1049                         ret_error = 2;
1050                         device_printf(dev, "%s: unable to send api"
1051                             " version to PF on attempt %d, error %d\n",
1052                             __func__, i+1, error);
1053                 }
1054
1055                 asq_retries = 0;
1056                 while (!i40e_asq_done(hw)) {
1057                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1058                                 i40e_shutdown_adminq(hw);
1059                                 device_printf(dev, "Admin Queue timeout "
1060                                     "(waiting for send_api_ver), %d more tries...\n",
1061                                     IXLV_AQ_MAX_ERR - (i + 1));
1062                                 ret_error = 3;
1063                                 break;
1064                         } 
1065                         i40e_msec_pause(10);
1066                 }
1067                 if (asq_retries > IXLV_AQ_MAX_ERR)
1068                         continue;
1069
1070                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1071
1072                 /* Verify that the VF accepts the PF's API version */
1073                 error = ixlv_verify_api_ver(sc);
1074                 if (error == ETIMEDOUT) {
1075                         if (!send_api_ver_retried) {
1076                                 /* Resend message, one more time */
1077                                 send_api_ver_retried = true;
1078                                 device_printf(dev,
1079                                     "%s: Timeout while verifying API version on first"
1080                                     " try!\n", __func__);
1081                                 goto retry_send;
1082                         } else {
1083                                 device_printf(dev,
1084                                     "%s: Timeout while verifying API version on second"
1085                                     " try!\n", __func__);
1086                                 ret_error = 4;
1087                                 break;
1088                         }
1089                 }
1090                 if (error) {
1091                         device_printf(dev,
1092                             "%s: Unable to verify API version,"
1093                             " error %s\n", __func__, i40e_stat_str(hw, error));
1094                         ret_error = 5;
1095                 }
1096                 break;
1097         }
1098
1099         if (ret_error >= 4)
1100                 i40e_shutdown_adminq(hw);
1101         return (ret_error);
1102 }
1103
1104 /*
1105  * ixlv_attach() helper function; asks the PF for this VF's
1106  * configuration, and saves the information if it receives it.
1107  */
1108 static int
1109 ixlv_vf_config(struct ixlv_sc *sc)
1110 {
1111         struct i40e_hw *hw = &sc->hw;
1112         device_t dev = sc->dev;
1113         int bufsz, error = 0, ret_error = 0;
1114         int asq_retries, retried = 0;
1115
1116 retry_config:
1117         error = ixlv_send_vf_config_msg(sc);
1118         if (error) {
1119                 device_printf(dev,
1120                     "%s: Unable to send VF config request, attempt %d,"
1121                     " error %d\n", __func__, retried + 1, error);
1122                 ret_error = 2;
1123         }
1124
1125         asq_retries = 0;
1126         while (!i40e_asq_done(hw)) {
1127                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1128                         device_printf(dev, "%s: Admin Queue timeout "
1129                             "(waiting for send_vf_config_msg), attempt %d\n",
1130                             __func__, retried + 1);
1131                         ret_error = 3;
1132                         goto fail;
1133                 }
1134                 i40e_msec_pause(10);
1135         }
1136
1137         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1138             retried + 1);
1139
1140         if (!sc->vf_res) {
1141                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1142                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1143                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1144                 if (!sc->vf_res) {
1145                         device_printf(dev,
1146                             "%s: Unable to allocate memory for VF configuration"
1147                             " message from PF on attempt %d\n", __func__, retried + 1);
1148                         ret_error = 1;
1149                         goto fail;
1150                 }
1151         }
1152
1153         /* Check for VF config response */
1154         error = ixlv_get_vf_config(sc);
1155         if (error == ETIMEDOUT) {
1156                 /* The 1st time we timeout, send the configuration message again */
1157                 if (!retried) {
1158                         retried++;
1159                         goto retry_config;
1160                 }
1161                 device_printf(dev,
1162                     "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1163                     __func__);
1164         }
1165         if (error) {
1166                 device_printf(dev,
1167                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1168                     __func__, retried + 1);
1169                 ret_error = 4;
1170         }
1171         goto done;
1172
1173 fail:
1174         free(sc->vf_res, M_DEVBUF);
1175 done:
1176         return (ret_error);
1177 }
1178
1179 /*
1180  * Allocate MSI/X vectors, setup the AQ vector early
1181  */
1182 static int
1183 ixlv_init_msix(struct ixlv_sc *sc)
1184 {
1185         device_t dev = sc->dev;
1186         int rid, want, vectors, queues, available;
1187         int auto_max_queues;
1188
1189         rid = PCIR_BAR(IXL_MSIX_BAR);
1190         sc->msix_mem = bus_alloc_resource_any(dev,
1191             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1192         if (!sc->msix_mem) {
1193                 /* May not be enabled */
1194                 device_printf(sc->dev,
1195                     "Unable to map MSIX table\n");
1196                 goto fail;
1197         }
1198
1199         available = pci_msix_count(dev); 
1200         if (available == 0) { /* system has msix disabled */
1201                 bus_release_resource(dev, SYS_RES_MEMORY,
1202                     rid, sc->msix_mem);
1203                 sc->msix_mem = NULL;
1204                 goto fail;
1205         }
1206
1207         /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1208         auto_max_queues = min(mp_ncpus, available - 1);
1209         /* Clamp queues to # assigned to VF by PF */
1210         auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1211
1212         /* Override with tunable value if tunable is less than autoconfig count */
1213         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1214                 queues = ixlv_max_queues;
1215         /* Use autoconfig amount if that's lower */
1216         else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1217                 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1218                     "autoconfig amount (%d)...\n",
1219                     ixlv_max_queues, auto_max_queues);
1220                 queues = auto_max_queues;
1221         }
1222         /* Limit maximum auto-configured queues to 8 if no user value is set */
1223         else
1224                 queues = min(auto_max_queues, 8);
1225
1226 #ifdef  RSS
1227         /* If we're doing RSS, clamp at the number of RSS buckets */
1228         if (queues > rss_getnumbuckets())
1229                 queues = rss_getnumbuckets();
1230 #endif
1231
1232         /*
1233         ** Want one vector (RX/TX pair) per queue
1234         ** plus an additional for the admin queue.
1235         */
1236         want = queues + 1;
1237         if (want <= available)  /* Have enough */
1238                 vectors = want;
1239         else {
1240                 device_printf(sc->dev,
1241                     "MSIX Configuration Problem, "
1242                     "%d vectors available but %d wanted!\n",
1243                     available, want);
1244                 goto fail;
1245         }
1246
1247 #ifdef RSS
1248         /*
1249         * If we're doing RSS, the number of queues needs to
1250         * match the number of RSS buckets that are configured.
1251         *
1252         * + If there's more queues than RSS buckets, we'll end
1253         *   up with queues that get no traffic.
1254         *
1255         * + If there's more RSS buckets than queues, we'll end
1256         *   up having multiple RSS buckets map to the same queue,
1257         *   so there'll be some contention.
1258         */
1259         if (queues != rss_getnumbuckets()) {
1260                 device_printf(dev,
1261                     "%s: queues (%d) != RSS buckets (%d)"
1262                     "; performance will be impacted.\n",
1263                      __func__, queues, rss_getnumbuckets());
1264         }
1265 #endif
1266
1267         if (pci_alloc_msix(dev, &vectors) == 0) {
1268                 device_printf(sc->dev,
1269                     "Using MSIX interrupts with %d vectors\n", vectors);
1270                 sc->msix = vectors;
1271                 sc->vsi.num_queues = queues;
1272         }
1273
1274         /* Next we need to setup the vector for the Admin Queue */
1275         rid = 1;        /* zero vector + 1 */
1276         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1277             &rid, RF_SHAREABLE | RF_ACTIVE);
1278         if (sc->res == NULL) {
1279                 device_printf(dev, "Unable to allocate"
1280                     " bus resource: AQ interrupt \n");
1281                 goto fail;
1282         }
1283         if (bus_setup_intr(dev, sc->res,
1284             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1285             ixlv_msix_adminq, sc, &sc->tag)) {
1286                 sc->res = NULL;
1287                 device_printf(dev, "Failed to register AQ handler");
1288                 goto fail;
1289         }
1290         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1291
1292         return (vectors);
1293
1294 fail:
1295         /* The VF driver MUST use MSIX */
1296         return (0);
1297 }
1298
1299 static int
1300 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1301 {
1302         int             rid;
1303         device_t        dev = sc->dev;
1304
1305         rid = PCIR_BAR(0);
1306         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1307             &rid, RF_ACTIVE);
1308
1309         if (!(sc->pci_mem)) {
1310                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1311                 return (ENXIO);
1312         }
1313
1314         sc->osdep.mem_bus_space_tag =
1315                 rman_get_bustag(sc->pci_mem);
1316         sc->osdep.mem_bus_space_handle =
1317                 rman_get_bushandle(sc->pci_mem);
1318         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1319         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1320         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1321
1322         sc->hw.back = &sc->osdep;
1323
1324         /*
1325         ** Explicitly set the guest PCI BUSMASTER capability
1326         ** and we must rewrite the ENABLE in the MSIX control
1327         ** register again at this point to cause the host to
1328         ** successfully initialize us.
1329         **
1330         ** This must be set before accessing any registers.
1331         */
1332         {
1333                 u16 pci_cmd_word;
1334                 int msix_ctrl;
1335                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1336                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1337                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1338                 pci_find_cap(dev, PCIY_MSIX, &rid);
1339                 rid += PCIR_MSIX_CTRL;
1340                 msix_ctrl = pci_read_config(dev, rid, 2);
1341                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1342                 pci_write_config(dev, rid, msix_ctrl, 2);
1343         }
1344
1345         /* Disable adminq interrupts (just in case) */
1346         ixlv_disable_adminq_irq(&sc->hw);
1347
1348         return (0);
1349 }
1350
1351 static void
1352 ixlv_free_pci_resources(struct ixlv_sc *sc)
1353 {
1354         struct ixl_vsi         *vsi = &sc->vsi;
1355         struct ixl_queue       *que = vsi->queues;
1356         device_t                dev = sc->dev;
1357
1358         /* We may get here before stations are setup */
1359         if (que == NULL)
1360                 goto early;
1361
1362         /*
1363         **  Release all msix queue resources:
1364         */
1365         for (int i = 0; i < vsi->num_queues; i++, que++) {
1366                 int rid = que->msix + 1;
1367                 if (que->tag != NULL) {
1368                         bus_teardown_intr(dev, que->res, que->tag);
1369                         que->tag = NULL;
1370                 }
1371                 if (que->res != NULL) {
1372                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1373                         que->res = NULL;
1374                 }
1375         }
1376         
1377 early:
1378         pci_release_msi(dev);
1379
1380         if (sc->msix_mem != NULL)
1381                 bus_release_resource(dev, SYS_RES_MEMORY,
1382                     PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1383
1384         if (sc->pci_mem != NULL)
1385                 bus_release_resource(dev, SYS_RES_MEMORY,
1386                     PCIR_BAR(0), sc->pci_mem);
1387 }
1388
1389 /*
1390  * Create taskqueue and tasklet for Admin Queue interrupts.
1391  */
1392 static int
1393 ixlv_init_taskqueue(struct ixlv_sc *sc)
1394 {
1395         int error = 0;
1396
1397         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1398
1399         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1400             taskqueue_thread_enqueue, &sc->tq);
1401         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1402             device_get_nameunit(sc->dev));
1403
1404         return (error);
1405 }
1406
1407 /*********************************************************************
1408  *
1409  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1410  *
1411  **********************************************************************/
1412 static int
1413 ixlv_assign_msix(struct ixlv_sc *sc)
1414 {
1415         device_t        dev = sc->dev;
1416         struct          ixl_vsi *vsi = &sc->vsi;
1417         struct          ixl_queue *que = vsi->queues;
1418         struct          tx_ring  *txr;
1419         int             error, rid, vector = 1;
1420 #ifdef  RSS
1421         cpuset_t        cpu_mask;
1422 #endif
1423
1424         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1425                 int cpu_id = i;
1426                 rid = vector + 1;
1427                 txr = &que->txr;
1428                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1429                     RF_SHAREABLE | RF_ACTIVE);
1430                 if (que->res == NULL) {
1431                         device_printf(dev,"Unable to allocate"
1432                             " bus resource: que interrupt [%d]\n", vector);
1433                         return (ENXIO);
1434                 }
1435                 /* Set the handler function */
1436                 error = bus_setup_intr(dev, que->res,
1437                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1438                     ixlv_msix_que, que, &que->tag);
1439                 if (error) {
1440                         que->res = NULL;
1441                         device_printf(dev, "Failed to register que handler");
1442                         return (error);
1443                 }
1444                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1445                 /* Bind the vector to a CPU */
1446 #ifdef RSS
1447                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1448 #endif
1449                 bus_bind_intr(dev, que->res, cpu_id);
1450                 que->msix = vector;
1451                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1452                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1453                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1454                     taskqueue_thread_enqueue, &que->tq);
1455 #ifdef RSS
1456                 CPU_SETOF(cpu_id, &cpu_mask);
1457                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1458                     &cpu_mask, "%s (bucket %d)",
1459                     device_get_nameunit(dev), cpu_id);
1460 #else
1461                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1462                     "%s que", device_get_nameunit(dev));
1463 #endif
1464
1465         }
1466
1467         return (0);
1468 }
1469
1470 /*
1471 ** Requests a VF reset from the PF.
1472 **
1473 ** Requires the VF's Admin Queue to be initialized.
1474 */
1475 static int
1476 ixlv_reset(struct ixlv_sc *sc)
1477 {
1478         struct i40e_hw  *hw = &sc->hw;
1479         device_t        dev = sc->dev;
1480         int             error = 0;
1481
1482         /* Ask the PF to reset us if we are initiating */
1483         if (sc->init_state != IXLV_RESET_PENDING)
1484                 ixlv_request_reset(sc);
1485
1486         i40e_msec_pause(100);
1487         error = ixlv_reset_complete(hw);
1488         if (error) {
1489                 device_printf(dev, "%s: VF reset failed\n",
1490                     __func__);
1491                 return (error);
1492         }
1493
1494         error = i40e_shutdown_adminq(hw);
1495         if (error) {
1496                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1497                     __func__, error);
1498                 return (error);
1499         }
1500
1501         error = i40e_init_adminq(hw);
1502         if (error) {
1503                 device_printf(dev, "%s: init_adminq failed: %d\n",
1504                     __func__, error);
1505                 return(error);
1506         }
1507
1508         return (0);
1509 }
1510
1511 static int
1512 ixlv_reset_complete(struct i40e_hw *hw)
1513 {
1514         u32 reg;
1515
1516         /* Wait up to ~10 seconds */
1517         for (int i = 0; i < 100; i++) {
1518                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1519                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1520
1521                 if ((reg == I40E_VFR_VFACTIVE) ||
1522                     (reg == I40E_VFR_COMPLETED))
1523                         return (0);
1524                 i40e_msec_pause(100);
1525         }
1526
1527         return (EBUSY);
1528 }
1529
1530
1531 /*********************************************************************
1532  *
1533  *  Setup networking device structure and register an interface.
1534  *
1535  **********************************************************************/
1536 static int
1537 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1538 {
1539         struct ifnet            *ifp;
1540         struct ixl_vsi          *vsi = &sc->vsi;
1541         struct ixl_queue        *que = vsi->queues;
1542
1543         INIT_DBG_DEV(dev, "begin");
1544
1545         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1546         if (ifp == NULL) {
1547                 device_printf(dev, "%s: could not allocate ifnet"
1548                     " structure!\n", __func__);
1549                 return (-1);
1550         }
1551
1552         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1553
1554         ifp->if_mtu = ETHERMTU;
1555         ifp->if_baudrate = IF_Gbps(40);
1556         ifp->if_init = ixlv_init;
1557         ifp->if_softc = vsi;
1558         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559         ifp->if_ioctl = ixlv_ioctl;
1560
1561 #if __FreeBSD_version >= 1100000
1562         if_setgetcounterfn(ifp, ixl_get_counter);
1563 #endif
1564
1565         ifp->if_transmit = ixl_mq_start;
1566
1567         ifp->if_qflush = ixl_qflush;
1568         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1569
1570         ether_ifattach(ifp, sc->hw.mac.addr);
1571
1572         vsi->max_frame_size =
1573             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1574             + ETHER_VLAN_ENCAP_LEN;
1575
1576         /*
1577          * Tell the upper layer(s) we support long frames.
1578          */
1579         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1580
1581         ifp->if_capabilities |= IFCAP_HWCSUM;
1582         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1583         ifp->if_capabilities |= IFCAP_TSO;
1584         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1585
1586         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1587                              |  IFCAP_VLAN_HWTSO
1588                              |  IFCAP_VLAN_MTU
1589                              |  IFCAP_VLAN_HWCSUM
1590                              |  IFCAP_LRO;
1591         ifp->if_capenable = ifp->if_capabilities;
1592
1593         /*
1594         ** Don't turn this on by default, if vlans are
1595         ** created on another pseudo device (eg. lagg)
1596         ** then vlan events are not passed thru, breaking
1597         ** operation, but with HW FILTER off it works. If
1598         ** using vlans directly on the ixl driver you can
1599         ** enable this and get full hardware tag filtering.
1600         */
1601         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1602
1603         /*
1604          * Specify the media types supported by this adapter and register
1605          * callbacks to update media and link information
1606          */
1607         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1608                      ixlv_media_status);
1609
1610         // JFV Add media types later?
1611
1612         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1613         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1614
1615         INIT_DBG_DEV(dev, "end");
1616         return (0);
1617 }
1618
1619 /*
1620 ** Allocate and setup the interface queues
1621 */
1622 static int
1623 ixlv_setup_queues(struct ixlv_sc *sc)
1624 {
1625         device_t                dev = sc->dev;
1626         struct ixl_vsi          *vsi;
1627         struct ixl_queue        *que;
1628         struct tx_ring          *txr;
1629         struct rx_ring          *rxr;
1630         int                     rsize, tsize;
1631         int                     error = I40E_SUCCESS;
1632
1633         vsi = &sc->vsi;
1634         vsi->back = (void *)sc;
1635         vsi->hw = &sc->hw;
1636         vsi->num_vlans = 0;
1637
1638         /* Get memory for the station queues */
1639         if (!(vsi->queues =
1640                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1641                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1642                         device_printf(dev, "Unable to allocate queue memory\n");
1643                         error = ENOMEM;
1644                         goto early;
1645         }
1646
1647         for (int i = 0; i < vsi->num_queues; i++) {
1648                 que = &vsi->queues[i];
1649                 que->num_desc = ixlv_ringsz;
1650                 que->me = i;
1651                 que->vsi = vsi;
1652
1653                 txr = &que->txr;
1654                 txr->que = que;
1655                 txr->tail = I40E_QTX_TAIL1(que->me);
1656                 /* Initialize the TX lock */
1657                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1658                     device_get_nameunit(dev), que->me);
1659                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1660                 /*
1661                 ** Create the TX descriptor ring, the extra int is
1662                 ** added as the location for HEAD WB.
1663                 */
1664                 tsize = roundup2((que->num_desc *
1665                     sizeof(struct i40e_tx_desc)) +
1666                     sizeof(u32), DBA_ALIGN);
1667                 if (i40e_allocate_dma_mem(&sc->hw,
1668                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1669                         device_printf(dev,
1670                             "Unable to allocate TX Descriptor memory\n");
1671                         error = ENOMEM;
1672                         goto fail;
1673                 }
1674                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1675                 bzero((void *)txr->base, tsize);
1676                 /* Now allocate transmit soft structs for the ring */
1677                 if (ixl_allocate_tx_data(que)) {
1678                         device_printf(dev,
1679                             "Critical Failure setting up TX structures\n");
1680                         error = ENOMEM;
1681                         goto fail;
1682                 }
1683                 /* Allocate a buf ring */
1684                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1685                     M_WAITOK, &txr->mtx);
1686                 if (txr->br == NULL) {
1687                         device_printf(dev,
1688                             "Critical Failure setting up TX buf ring\n");
1689                         error = ENOMEM;
1690                         goto fail;
1691                 }
1692
1693                 /*
1694                  * Next the RX queues...
1695                  */ 
1696                 rsize = roundup2(que->num_desc *
1697                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1698                 rxr = &que->rxr;
1699                 rxr->que = que;
1700                 rxr->tail = I40E_QRX_TAIL1(que->me);
1701
1702                 /* Initialize the RX side lock */
1703                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1704                     device_get_nameunit(dev), que->me);
1705                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1706
1707                 if (i40e_allocate_dma_mem(&sc->hw,
1708                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1709                         device_printf(dev,
1710                             "Unable to allocate RX Descriptor memory\n");
1711                         error = ENOMEM;
1712                         goto fail;
1713                 }
1714                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1715                 bzero((void *)rxr->base, rsize);
1716
1717                 /* Allocate receive soft structs for the ring */
1718                 if (ixl_allocate_rx_data(que)) {
1719                         device_printf(dev,
1720                             "Critical Failure setting up receive structs\n");
1721                         error = ENOMEM;
1722                         goto fail;
1723                 }
1724         }
1725
1726         return (0);
1727
1728 fail:
1729         for (int i = 0; i < vsi->num_queues; i++) {
1730                 que = &vsi->queues[i];
1731                 rxr = &que->rxr;
1732                 txr = &que->txr;
1733                 if (rxr->base)
1734                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1735                 if (txr->base)
1736                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1737         }
1738         free(vsi->queues, M_DEVBUF);
1739
1740 early:
1741         return (error);
1742 }
1743
1744 /*
1745 ** This routine is run via an vlan config EVENT,
1746 ** it enables us to use the HW Filter table since
1747 ** we can get the vlan id. This just creates the
1748 ** entry in the soft version of the VFTA, init will
1749 ** repopulate the real table.
1750 */
1751 static void
1752 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1753 {
1754         struct ixl_vsi          *vsi = arg;
1755         struct ixlv_sc          *sc = vsi->back;
1756         struct ixlv_vlan_filter *v;
1757
1758
1759         if (ifp->if_softc != arg)   /* Not our event */
1760                 return;
1761
1762         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1763                 return;
1764
1765         /* Sanity check - make sure it doesn't already exist */
1766         SLIST_FOREACH(v, sc->vlan_filters, next) {
1767                 if (v->vlan == vtag)
1768                         return;
1769         }
1770
1771         mtx_lock(&sc->mtx);
1772         ++vsi->num_vlans;
1773         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1774         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1775         v->vlan = vtag;
1776         v->flags = IXL_FILTER_ADD;
1777         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1778             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1779         mtx_unlock(&sc->mtx);
1780         return;
1781 }
1782
1783 /*
1784 ** This routine is run via an vlan
1785 ** unconfig EVENT, remove our entry
1786 ** in the soft vfta.
1787 */
1788 static void
1789 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1790 {
1791         struct ixl_vsi          *vsi = arg;
1792         struct ixlv_sc          *sc = vsi->back;
1793         struct ixlv_vlan_filter *v;
1794         int                     i = 0;
1795         
1796         if (ifp->if_softc != arg)
1797                 return;
1798
1799         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1800                 return;
1801
1802         mtx_lock(&sc->mtx);
1803         SLIST_FOREACH(v, sc->vlan_filters, next) {
1804                 if (v->vlan == vtag) {
1805                         v->flags = IXL_FILTER_DEL;
1806                         ++i;
1807                         --vsi->num_vlans;
1808                 }
1809         }
1810         if (i)
1811                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1812                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1813         mtx_unlock(&sc->mtx);
1814         return;
1815 }
1816
1817 /*
1818 ** Get a new filter and add it to the mac filter list.
1819 */
1820 static struct ixlv_mac_filter *
1821 ixlv_get_mac_filter(struct ixlv_sc *sc)
1822 {
1823         struct ixlv_mac_filter  *f;
1824
1825         f = malloc(sizeof(struct ixlv_mac_filter),
1826             M_DEVBUF, M_NOWAIT | M_ZERO);
1827         if (f)
1828                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1829
1830         return (f);
1831 }
1832
1833 /*
1834 ** Find the filter with matching MAC address
1835 */
1836 static struct ixlv_mac_filter *
1837 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1838 {
1839         struct ixlv_mac_filter  *f;
1840         bool                            match = FALSE;
1841
1842         SLIST_FOREACH(f, sc->mac_filters, next) {
1843                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1844                         match = TRUE;
1845                         break;
1846                 }
1847         }       
1848
1849         if (!match)
1850                 f = NULL;
1851         return (f);
1852 }
1853
1854 static int
1855 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1856 {
1857         device_t                dev = sc->dev;
1858         int                     error = 0;
1859
1860         if (sc->tag != NULL) {
1861                 bus_teardown_intr(dev, sc->res, sc->tag);
1862                 if (error) {
1863                         device_printf(dev, "bus_teardown_intr() for"
1864                             " interrupt 0 failed\n");
1865                         // return (ENXIO);
1866                 }
1867                 sc->tag = NULL;
1868         }
1869         if (sc->res != NULL) {
1870                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1871                 if (error) {
1872                         device_printf(dev, "bus_release_resource() for"
1873                             " interrupt 0 failed\n");
1874                         // return (ENXIO);
1875                 }
1876                 sc->res = NULL;
1877         }
1878
1879         return (0);
1880
1881 }
1882
1883 /*
1884 ** Admin Queue interrupt handler
1885 */
1886 static void
1887 ixlv_msix_adminq(void *arg)
1888 {
1889         struct ixlv_sc  *sc = arg;
1890         struct i40e_hw  *hw = &sc->hw;
1891         u32             reg, mask;
1892
1893         reg = rd32(hw, I40E_VFINT_ICR01);
1894         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1895
1896         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1897         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1898         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1899
1900         /* schedule task */
1901         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1902         return;
1903 }
1904
1905 void
1906 ixlv_enable_intr(struct ixl_vsi *vsi)
1907 {
1908         struct i40e_hw          *hw = vsi->hw;
1909         struct ixl_queue        *que = vsi->queues;
1910
1911         ixlv_enable_adminq_irq(hw);
1912         for (int i = 0; i < vsi->num_queues; i++, que++)
1913                 ixlv_enable_queue_irq(hw, que->me);
1914 }
1915
1916 void
1917 ixlv_disable_intr(struct ixl_vsi *vsi)
1918 {
1919         struct i40e_hw          *hw = vsi->hw;
1920         struct ixl_queue       *que = vsi->queues;
1921
1922         ixlv_disable_adminq_irq(hw);
1923         for (int i = 0; i < vsi->num_queues; i++, que++)
1924                 ixlv_disable_queue_irq(hw, que->me);
1925 }
1926
1927
1928 static void
1929 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1930 {
1931         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1932         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1933         /* flush */
1934         rd32(hw, I40E_VFGEN_RSTAT);
1935         return;
1936 }
1937
1938 static void
1939 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1940 {
1941         wr32(hw, I40E_VFINT_DYN_CTL01,
1942             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1943             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1944         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1945         /* flush */
1946         rd32(hw, I40E_VFGEN_RSTAT);
1947         return;
1948 }
1949
1950 static void
1951 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1952 {
1953         u32             reg;
1954
1955         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1956             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1957             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1958         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1959 }
1960
1961 static void
1962 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1963 {
1964         wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1965             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1966         rd32(hw, I40E_VFGEN_RSTAT);
1967         return;
1968 }
1969
1970 /*
1971  * Get initial ITR values from tunable values.
1972  */
1973 static void
1974 ixlv_configure_itr(struct ixlv_sc *sc)
1975 {
1976         struct i40e_hw          *hw = &sc->hw;
1977         struct ixl_vsi          *vsi = &sc->vsi;
1978         struct ixl_queue        *que = vsi->queues;
1979
1980         vsi->rx_itr_setting = ixlv_rx_itr;
1981         vsi->tx_itr_setting = ixlv_tx_itr;
1982
1983         for (int i = 0; i < vsi->num_queues; i++, que++) {
1984                 struct tx_ring  *txr = &que->txr;
1985                 struct rx_ring  *rxr = &que->rxr;
1986
1987                 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1988                     vsi->rx_itr_setting);
1989                 rxr->itr = vsi->rx_itr_setting;
1990                 rxr->latency = IXL_AVE_LATENCY;
1991
1992                 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1993                     vsi->tx_itr_setting);
1994                 txr->itr = vsi->tx_itr_setting;
1995                 txr->latency = IXL_AVE_LATENCY;
1996         }
1997 }
1998
1999 /*
2000 ** Provide a update to the queue RX
2001 ** interrupt moderation value.
2002 */
2003 static void
2004 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2005 {
2006         struct ixl_vsi  *vsi = que->vsi;
2007         struct i40e_hw  *hw = vsi->hw;
2008         struct rx_ring  *rxr = &que->rxr;
2009         u16             rx_itr;
2010         u16             rx_latency = 0;
2011         int             rx_bytes;
2012
2013
2014         /* Idle, do nothing */
2015         if (rxr->bytes == 0)
2016                 return;
2017
2018         if (ixlv_dynamic_rx_itr) {
2019                 rx_bytes = rxr->bytes/rxr->itr;
2020                 rx_itr = rxr->itr;
2021
2022                 /* Adjust latency range */
2023                 switch (rxr->latency) {
2024                 case IXL_LOW_LATENCY:
2025                         if (rx_bytes > 10) {
2026                                 rx_latency = IXL_AVE_LATENCY;
2027                                 rx_itr = IXL_ITR_20K;
2028                         }
2029                         break;
2030                 case IXL_AVE_LATENCY:
2031                         if (rx_bytes > 20) {
2032                                 rx_latency = IXL_BULK_LATENCY;
2033                                 rx_itr = IXL_ITR_8K;
2034                         } else if (rx_bytes <= 10) {
2035                                 rx_latency = IXL_LOW_LATENCY;
2036                                 rx_itr = IXL_ITR_100K;
2037                         }
2038                         break;
2039                 case IXL_BULK_LATENCY:
2040                         if (rx_bytes <= 20) {
2041                                 rx_latency = IXL_AVE_LATENCY;
2042                                 rx_itr = IXL_ITR_20K;
2043                         }
2044                         break;
2045                  }
2046
2047                 rxr->latency = rx_latency;
2048
2049                 if (rx_itr != rxr->itr) {
2050                         /* do an exponential smoothing */
2051                         rx_itr = (10 * rx_itr * rxr->itr) /
2052                             ((9 * rx_itr) + rxr->itr);
2053                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2054                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2055                             que->me), rxr->itr);
2056                 }
2057         } else { /* We may have have toggled to non-dynamic */
2058                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2059                         vsi->rx_itr_setting = ixlv_rx_itr;
2060                 /* Update the hardware if needed */
2061                 if (rxr->itr != vsi->rx_itr_setting) {
2062                         rxr->itr = vsi->rx_itr_setting;
2063                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2064                             que->me), rxr->itr);
2065                 }
2066         }
2067         rxr->bytes = 0;
2068         rxr->packets = 0;
2069         return;
2070 }
2071
2072
2073 /*
2074 ** Provide a update to the queue TX
2075 ** interrupt moderation value.
2076 */
2077 static void
2078 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2079 {
2080         struct ixl_vsi  *vsi = que->vsi;
2081         struct i40e_hw  *hw = vsi->hw;
2082         struct tx_ring  *txr = &que->txr;
2083         u16             tx_itr;
2084         u16             tx_latency = 0;
2085         int             tx_bytes;
2086
2087
2088         /* Idle, do nothing */
2089         if (txr->bytes == 0)
2090                 return;
2091
2092         if (ixlv_dynamic_tx_itr) {
2093                 tx_bytes = txr->bytes/txr->itr;
2094                 tx_itr = txr->itr;
2095
2096                 switch (txr->latency) {
2097                 case IXL_LOW_LATENCY:
2098                         if (tx_bytes > 10) {
2099                                 tx_latency = IXL_AVE_LATENCY;
2100                                 tx_itr = IXL_ITR_20K;
2101                         }
2102                         break;
2103                 case IXL_AVE_LATENCY:
2104                         if (tx_bytes > 20) {
2105                                 tx_latency = IXL_BULK_LATENCY;
2106                                 tx_itr = IXL_ITR_8K;
2107                         } else if (tx_bytes <= 10) {
2108                                 tx_latency = IXL_LOW_LATENCY;
2109                                 tx_itr = IXL_ITR_100K;
2110                         }
2111                         break;
2112                 case IXL_BULK_LATENCY:
2113                         if (tx_bytes <= 20) {
2114                                 tx_latency = IXL_AVE_LATENCY;
2115                                 tx_itr = IXL_ITR_20K;
2116                         }
2117                         break;
2118                 }
2119
2120                 txr->latency = tx_latency;
2121
2122                 if (tx_itr != txr->itr) {
2123                  /* do an exponential smoothing */
2124                         tx_itr = (10 * tx_itr * txr->itr) /
2125                             ((9 * tx_itr) + txr->itr);
2126                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2127                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2128                             que->me), txr->itr);
2129                 }
2130
2131         } else { /* We may have have toggled to non-dynamic */
2132                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2133                         vsi->tx_itr_setting = ixlv_tx_itr;
2134                 /* Update the hardware if needed */
2135                 if (txr->itr != vsi->tx_itr_setting) {
2136                         txr->itr = vsi->tx_itr_setting;
2137                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2138                             que->me), txr->itr);
2139                 }
2140         }
2141         txr->bytes = 0;
2142         txr->packets = 0;
2143         return;
2144 }
2145
2146
2147 /*
2148 **
2149 ** MSIX Interrupt Handlers and Tasklets
2150 **
2151 */
2152 static void
2153 ixlv_handle_que(void *context, int pending)
2154 {
2155         struct ixl_queue *que = context;
2156         struct ixl_vsi *vsi = que->vsi;
2157         struct i40e_hw  *hw = vsi->hw;
2158         struct tx_ring  *txr = &que->txr;
2159         struct ifnet    *ifp = vsi->ifp;
2160         bool            more;
2161
2162         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2163                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2164                 mtx_lock(&txr->mtx);
2165                 ixl_txeof(que);
2166                 if (!drbr_empty(ifp, txr->br))
2167                         ixl_mq_start_locked(ifp, txr);
2168                 mtx_unlock(&txr->mtx);
2169                 if (more) {
2170                         taskqueue_enqueue(que->tq, &que->task);
2171                         return;
2172                 }
2173         }
2174
2175         /* Reenable this interrupt - hmmm */
2176         ixlv_enable_queue_irq(hw, que->me);
2177         return;
2178 }
2179
2180
2181 /*********************************************************************
2182  *
2183  *  MSIX Queue Interrupt Service routine
2184  *
2185  **********************************************************************/
2186 static void
2187 ixlv_msix_que(void *arg)
2188 {
2189         struct ixl_queue        *que = arg;
2190         struct ixl_vsi  *vsi = que->vsi;
2191         struct i40e_hw  *hw = vsi->hw;
2192         struct tx_ring  *txr = &que->txr;
2193         bool            more_tx, more_rx;
2194
2195         /* Spurious interrupts are ignored */
2196         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2197                 return;
2198
2199         ++que->irqs;
2200
2201         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2202
2203         mtx_lock(&txr->mtx);
2204         more_tx = ixl_txeof(que);
2205         /*
2206         ** Make certain that if the stack 
2207         ** has anything queued the task gets
2208         ** scheduled to handle it.
2209         */
2210         if (!drbr_empty(vsi->ifp, txr->br))
2211                 more_tx = 1;
2212         mtx_unlock(&txr->mtx);
2213
2214         ixlv_set_queue_rx_itr(que);
2215         ixlv_set_queue_tx_itr(que);
2216
2217         if (more_tx || more_rx)
2218                 taskqueue_enqueue(que->tq, &que->task);
2219         else
2220                 ixlv_enable_queue_irq(hw, que->me);
2221
2222         return;
2223 }
2224
2225
2226 /*********************************************************************
2227  *
2228  *  Media Ioctl callback
2229  *
2230  *  This routine is called whenever the user queries the status of
2231  *  the interface using ifconfig.
2232  *
2233  **********************************************************************/
2234 static void
2235 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2236 {
2237         struct ixl_vsi          *vsi = ifp->if_softc;
2238         struct ixlv_sc  *sc = vsi->back;
2239
2240         INIT_DBG_IF(ifp, "begin");
2241
2242         mtx_lock(&sc->mtx);
2243
2244         ixlv_update_link_status(sc);
2245
2246         ifmr->ifm_status = IFM_AVALID;
2247         ifmr->ifm_active = IFM_ETHER;
2248
2249         if (!sc->link_up) {
2250                 mtx_unlock(&sc->mtx);
2251                 INIT_DBG_IF(ifp, "end: link not up");
2252                 return;
2253         }
2254
2255         ifmr->ifm_status |= IFM_ACTIVE;
2256         /* Hardware is always full-duplex */
2257         ifmr->ifm_active |= IFM_FDX;
2258         mtx_unlock(&sc->mtx);
2259         INIT_DBG_IF(ifp, "end");
2260         return;
2261 }
2262
2263 /*********************************************************************
2264  *
2265  *  Media Ioctl callback
2266  *
2267  *  This routine is called when the user changes speed/duplex using
2268  *  media/mediopt option with ifconfig.
2269  *
2270  **********************************************************************/
2271 static int
2272 ixlv_media_change(struct ifnet * ifp)
2273 {
2274         struct ixl_vsi *vsi = ifp->if_softc;
2275         struct ifmedia *ifm = &vsi->media;
2276
2277         INIT_DBG_IF(ifp, "begin");
2278
2279         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2280                 return (EINVAL);
2281
2282         INIT_DBG_IF(ifp, "end");
2283         return (0);
2284 }
2285
2286
2287 /*********************************************************************
2288  *  Multicast Initialization
2289  *
2290  *  This routine is called by init to reset a fresh state.
2291  *
2292  **********************************************************************/
2293
2294 static void
2295 ixlv_init_multi(struct ixl_vsi *vsi)
2296 {
2297         struct ixlv_mac_filter *f;
2298         struct ixlv_sc  *sc = vsi->back;
2299         int                     mcnt = 0;
2300
2301         IOCTL_DBG_IF(vsi->ifp, "begin");
2302
2303         /* First clear any multicast filters */
2304         SLIST_FOREACH(f, sc->mac_filters, next) {
2305                 if ((f->flags & IXL_FILTER_USED)
2306                     && (f->flags & IXL_FILTER_MC)) {
2307                         f->flags |= IXL_FILTER_DEL;
2308                         mcnt++;
2309                 }
2310         }
2311         if (mcnt > 0)
2312                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2313                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2314                     sc);
2315
2316         IOCTL_DBG_IF(vsi->ifp, "end");
2317 }
2318
2319 static void
2320 ixlv_add_multi(struct ixl_vsi *vsi)
2321 {
2322         struct ifmultiaddr      *ifma;
2323         struct ifnet            *ifp = vsi->ifp;
2324         struct ixlv_sc  *sc = vsi->back;
2325         int                     mcnt = 0;
2326
2327         IOCTL_DBG_IF(ifp, "begin");
2328
2329         if_maddr_rlock(ifp);
2330         /*
2331         ** Get a count, to decide if we
2332         ** simply use multicast promiscuous.
2333         */
2334         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2335                 if (ifma->ifma_addr->sa_family != AF_LINK)
2336                         continue;
2337                 mcnt++;
2338         }
2339         if_maddr_runlock(ifp);
2340
2341         /* TODO: Remove -- cannot set promiscuous mode in a VF */
2342         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2343                 /* delete all multicast filters */
2344                 ixlv_init_multi(vsi);
2345                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2346                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2347                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2348                     sc);
2349                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2350                 return;
2351         }
2352
2353         mcnt = 0;
2354         if_maddr_rlock(ifp);
2355         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2356                 if (ifma->ifma_addr->sa_family != AF_LINK)
2357                         continue;
2358                 if (!ixlv_add_mac_filter(sc,
2359                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2360                     IXL_FILTER_MC))
2361                         mcnt++;
2362         }
2363         if_maddr_runlock(ifp);
2364         /*
2365         ** Notify AQ task that sw filters need to be
2366         ** added to hw list
2367         */
2368         if (mcnt > 0)
2369                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2370                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2371                     sc);
2372
2373         IOCTL_DBG_IF(ifp, "end");
2374 }
2375
2376 static void
2377 ixlv_del_multi(struct ixl_vsi *vsi)
2378 {
2379         struct ixlv_mac_filter *f;
2380         struct ifmultiaddr      *ifma;
2381         struct ifnet            *ifp = vsi->ifp;
2382         struct ixlv_sc  *sc = vsi->back;
2383         int                     mcnt = 0;
2384         bool            match = FALSE;
2385
2386         IOCTL_DBG_IF(ifp, "begin");
2387
2388         /* Search for removed multicast addresses */
2389         if_maddr_rlock(ifp);
2390         SLIST_FOREACH(f, sc->mac_filters, next) {
2391                 if ((f->flags & IXL_FILTER_USED)
2392                     && (f->flags & IXL_FILTER_MC)) {
2393                         /* check if mac address in filter is in sc's list */
2394                         match = FALSE;
2395                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2396                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2397                                         continue;
2398                                 u8 *mc_addr =
2399                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2400                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2401                                         match = TRUE;
2402                                         break;
2403                                 }
2404                         }
2405                         /* if this filter is not in the sc's list, remove it */
2406                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2407                                 f->flags |= IXL_FILTER_DEL;
2408                                 mcnt++;
2409                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2410                                     MAC_FORMAT_ARGS(f->macaddr));
2411                         }
2412                         else if (match == FALSE)
2413                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2414                                     MAC_FORMAT_ARGS(f->macaddr));
2415                 }
2416         }
2417         if_maddr_runlock(ifp);
2418
2419         if (mcnt > 0)
2420                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2421                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2422                     sc);
2423
2424         IOCTL_DBG_IF(ifp, "end");
2425 }
2426
2427 /*********************************************************************
2428  *  Timer routine
2429  *
2430  *  This routine checks for link status,updates statistics,
2431  *  and runs the watchdog check.
2432  *
2433  **********************************************************************/
2434
2435 static void
2436 ixlv_local_timer(void *arg)
2437 {
2438         struct ixlv_sc  *sc = arg;
2439         struct i40e_hw          *hw = &sc->hw;
2440         struct ixl_vsi          *vsi = &sc->vsi;
2441         struct ixl_queue        *que = vsi->queues;
2442         device_t                dev = sc->dev;
2443         struct tx_ring          *txr;
2444         int                     hung = 0;
2445         u32                     mask, val;
2446         s32                     timer, new_timer;
2447
2448         IXLV_CORE_LOCK_ASSERT(sc);
2449
2450         /* If Reset is in progress just bail */
2451         if (sc->init_state == IXLV_RESET_PENDING)
2452                 return;
2453
2454         /* Check for when PF triggers a VF reset */
2455         val = rd32(hw, I40E_VFGEN_RSTAT) &
2456             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2457
2458         if (val != I40E_VFR_VFACTIVE
2459             && val != I40E_VFR_COMPLETED) {
2460                 DDPRINTF(dev, "reset in progress! (%d)", val);
2461                 return;
2462         }
2463
2464         ixlv_request_stats(sc);
2465
2466         /* clean and process any events */
2467         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2468
2469         /*
2470         ** Check status on the queues for a hang
2471         */
2472         mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2473             I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
2474             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2475
2476         for (int i = 0; i < vsi->num_queues; i++, que++) {
2477                 txr = &que->txr;
2478                 timer = atomic_load_acq_32(&txr->watchdog_timer);
2479                 if (timer > 0) {
2480                         new_timer = timer - hz;
2481                         if (new_timer <= 0) {
2482                                 atomic_store_rel_32(&txr->watchdog_timer, -1);
2483                                 device_printf(dev, "WARNING: queue %d "
2484                                     "appears to be hung!\n", que->me);
2485                                 ++hung;
2486                         } else {
2487                                 /*
2488                                  * If this fails, that means something in the TX path has updated
2489                                  * the watchdog, so it means the TX path is still working and
2490                                  * the watchdog doesn't need to countdown.
2491                                  */
2492                                 atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
2493                                 /* Any queues with outstanding work get a sw irq */
2494                                 wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2495                         }
2496                 }
2497         }
2498         /* Reset when a queue shows hung */
2499         if (hung)
2500                 goto hung;
2501
2502         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2503         return;
2504
2505 hung:
2506         device_printf(dev, "WARNING: Resetting!\n");
2507         sc->init_state = IXLV_RESET_REQUIRED;
2508         sc->watchdog_events++;
2509         ixlv_stop(sc);
2510         ixlv_init_locked(sc);
2511 }
2512
2513 /*
2514 ** Note: this routine updates the OS on the link state
2515 **      the real check of the hardware only happens with
2516 **      a link interrupt.
2517 */
2518 void
2519 ixlv_update_link_status(struct ixlv_sc *sc)
2520 {
2521         struct ixl_vsi          *vsi = &sc->vsi;
2522         struct ifnet            *ifp = vsi->ifp;
2523
2524         if (sc->link_up){ 
2525                 if (vsi->link_active == FALSE) {
2526                         if (bootverbose)
2527                                 if_printf(ifp,"Link is Up, %d Gbps\n",
2528                                     (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2529                         vsi->link_active = TRUE;
2530                         if_link_state_change(ifp, LINK_STATE_UP);
2531                 }
2532         } else { /* Link down */
2533                 if (vsi->link_active == TRUE) {
2534                         if (bootverbose)
2535                                 if_printf(ifp,"Link is Down\n");
2536                         if_link_state_change(ifp, LINK_STATE_DOWN);
2537                         vsi->link_active = FALSE;
2538                 }
2539         }
2540
2541         return;
2542 }
2543
2544 /*********************************************************************
2545  *
2546  *  This routine disables all traffic on the adapter by issuing a
2547  *  global reset on the MAC and deallocates TX/RX buffers.
2548  *
2549  **********************************************************************/
2550
2551 static void
2552 ixlv_stop(struct ixlv_sc *sc)
2553 {
2554         struct ifnet *ifp;
2555         int start;
2556
2557         ifp = sc->vsi.ifp;
2558         INIT_DBG_IF(ifp, "begin");
2559
2560         IXLV_CORE_LOCK_ASSERT(sc);
2561
2562         ixl_vc_flush(&sc->vc_mgr);
2563         ixlv_disable_queues(sc);
2564
2565         start = ticks;
2566         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2567             ((ticks - start) < hz/10))
2568                 ixlv_do_adminq_locked(sc);
2569
2570         /* Stop the local timer */
2571         callout_stop(&sc->timer);
2572
2573         INIT_DBG_IF(ifp, "end");
2574 }
2575
2576
2577 /*********************************************************************
2578  *
2579  *  Free all station queue structs.
2580  *
2581  **********************************************************************/
2582 static void
2583 ixlv_free_queues(struct ixl_vsi *vsi)
2584 {
2585         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2586         struct ixl_queue        *que = vsi->queues;
2587
2588         for (int i = 0; i < vsi->num_queues; i++, que++) {
2589                 struct tx_ring *txr = &que->txr;
2590                 struct rx_ring *rxr = &que->rxr;
2591         
2592                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2593                         continue;
2594                 IXL_TX_LOCK(txr);
2595                 ixl_free_que_tx(que);
2596                 if (txr->base)
2597                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2598                 IXL_TX_UNLOCK(txr);
2599                 IXL_TX_LOCK_DESTROY(txr);
2600
2601                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2602                         continue;
2603                 IXL_RX_LOCK(rxr);
2604                 ixl_free_que_rx(que);
2605                 if (rxr->base)
2606                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2607                 IXL_RX_UNLOCK(rxr);
2608                 IXL_RX_LOCK_DESTROY(rxr);
2609                 
2610         }
2611         free(vsi->queues, M_DEVBUF);
2612 }
2613
2614 static void
2615 ixlv_config_rss_reg(struct ixlv_sc *sc)
2616 {
2617         struct i40e_hw  *hw = &sc->hw;
2618         struct ixl_vsi  *vsi = &sc->vsi;
2619         u32             lut = 0;
2620         u64             set_hena = 0, hena;
2621         int             i, j, que_id;
2622         u32             rss_seed[IXL_RSS_KEY_SIZE_REG];
2623 #ifdef RSS
2624         u32             rss_hash_config;
2625 #endif
2626         
2627         /* Don't set up RSS if using a single queue */
2628         if (vsi->num_queues == 1) {
2629                 wr32(hw, I40E_VFQF_HENA(0), 0);
2630                 wr32(hw, I40E_VFQF_HENA(1), 0);
2631                 ixl_flush(hw);
2632                 return;
2633         }
2634
2635 #ifdef RSS
2636         /* Fetch the configured RSS key */
2637         rss_getkey((uint8_t *) &rss_seed);
2638 #else
2639         ixl_get_default_rss_key(rss_seed);
2640 #endif
2641
2642         /* Fill out hash function seed */
2643         for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2644                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2645
2646         /* Enable PCTYPES for RSS: */
2647 #ifdef RSS
2648         rss_hash_config = rss_gethashconfig();
2649         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2650                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2651         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2652                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2653         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2654                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2655         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2656                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2657         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2658                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2659         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2660                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2661         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2662                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2663 #else
2664         set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2665 #endif
2666         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2667             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2668         hena |= set_hena;
2669         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2670         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2671
2672         /* Populate the LUT with max no. of queues in round robin fashion */
2673         for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2674                 if (j == vsi->num_queues)
2675                         j = 0;
2676 #ifdef RSS
2677                 /*
2678                  * Fetch the RSS bucket id for the given indirection entry.
2679                  * Cap it at the number of configured buckets (which is
2680                  * num_queues.)
2681                  */
2682                 que_id = rss_get_indirection_to_bucket(i);
2683                 que_id = que_id % vsi->num_queues;
2684 #else
2685                 que_id = j;
2686 #endif
2687                 /* lut = 4-byte sliding window of 4 lut entries */
2688                 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2689                 /* On i = 3, we have 4 entries in lut; write to the register */
2690                 if ((i & 3) == 3) {
2691                         wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2692                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2693                 }
2694         }
2695         ixl_flush(hw);
2696 }
2697
2698 static void
2699 ixlv_config_rss_pf(struct ixlv_sc *sc)
2700 {
2701         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2702             IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2703
2704         ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2705             IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2706
2707         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2708             IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2709 }
2710
2711 /*
2712 ** ixlv_config_rss - setup RSS 
2713 **
2714 ** RSS keys and table are cleared on VF reset.
2715 */
2716 static void
2717 ixlv_config_rss(struct ixlv_sc *sc)
2718 {
2719         if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2720                 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2721                 ixlv_config_rss_reg(sc);
2722         } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2723                 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2724                 ixlv_config_rss_pf(sc);
2725         } else
2726                 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2727 }
2728
2729 /*
2730 ** This routine refreshes vlan filters, called by init
2731 ** it scans the filter table and then updates the AQ
2732 */
2733 static void
2734 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2735 {
2736         struct ixl_vsi                  *vsi = &sc->vsi;
2737         struct ixlv_vlan_filter *f;
2738         int                             cnt = 0;
2739
2740         if (vsi->num_vlans == 0)
2741                 return;
2742         /*
2743         ** Scan the filter table for vlan entries,
2744         ** and if found call for the AQ update.
2745         */
2746         SLIST_FOREACH(f, sc->vlan_filters, next)
2747                 if (f->flags & IXL_FILTER_ADD)
2748                         cnt++;
2749         if (cnt > 0)
2750                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2751                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2752 }
2753
2754
2755 /*
2756 ** This routine adds new MAC filters to the sc's list;
2757 ** these are later added in hardware by sending a virtual
2758 ** channel message.
2759 */
2760 static int
2761 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2762 {
2763         struct ixlv_mac_filter  *f;
2764
2765         /* Does one already exist? */
2766         f = ixlv_find_mac_filter(sc, macaddr);
2767         if (f != NULL) {
2768                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2769                     MAC_FORMAT_ARGS(macaddr));
2770                 return (EEXIST);
2771         }
2772
2773         /* If not, get a new empty filter */
2774         f = ixlv_get_mac_filter(sc);
2775         if (f == NULL) {
2776                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2777                     __func__);
2778                 return (ENOMEM);
2779         }
2780
2781         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2782             MAC_FORMAT_ARGS(macaddr));
2783
2784         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2785         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2786         f->flags |= flags;
2787         return (0);
2788 }
2789
2790 /*
2791 ** Marks a MAC filter for deletion.
2792 */
2793 static int
2794 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2795 {
2796         struct ixlv_mac_filter  *f;
2797
2798         f = ixlv_find_mac_filter(sc, macaddr);
2799         if (f == NULL)
2800                 return (ENOENT);
2801
2802         f->flags |= IXL_FILTER_DEL;
2803         return (0);
2804 }
2805
2806 /*
2807 ** Tasklet handler for MSIX Adminq interrupts
2808 **  - done outside interrupt context since it might sleep
2809 */
2810 static void
2811 ixlv_do_adminq(void *context, int pending)
2812 {
2813         struct ixlv_sc          *sc = context;
2814
2815         mtx_lock(&sc->mtx);
2816         ixlv_do_adminq_locked(sc);
2817         mtx_unlock(&sc->mtx);
2818         return;
2819 }
2820
2821 static void
2822 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2823 {
2824         struct i40e_hw                  *hw = &sc->hw;
2825         struct i40e_arq_event_info      event;
2826         struct i40e_virtchnl_msg        *v_msg;
2827         device_t                        dev = sc->dev;
2828         u16                             result = 0;
2829         u32                             reg, oldreg;
2830         i40e_status                     ret;
2831         bool                            aq_error = false;
2832
2833         IXLV_CORE_LOCK_ASSERT(sc);
2834
2835         event.buf_len = IXL_AQ_BUF_SZ;
2836         event.msg_buf = sc->aq_buffer;
2837         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2838
2839         do {
2840                 ret = i40e_clean_arq_element(hw, &event, &result);
2841                 if (ret)
2842                         break;
2843                 ixlv_vc_completion(sc, v_msg->v_opcode,
2844                     v_msg->v_retval, event.msg_buf, event.msg_len);
2845                 if (result != 0)
2846                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2847         } while (result);
2848
2849         /* check for Admin queue errors */
2850         oldreg = reg = rd32(hw, hw->aq.arq.len);
2851         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2852                 device_printf(dev, "ARQ VF Error detected\n");
2853                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2854                 aq_error = true;
2855         }
2856         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2857                 device_printf(dev, "ARQ Overflow Error detected\n");
2858                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2859                 aq_error = true;
2860         }
2861         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2862                 device_printf(dev, "ARQ Critical Error detected\n");
2863                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2864                 aq_error = true;
2865         }
2866         if (oldreg != reg)
2867                 wr32(hw, hw->aq.arq.len, reg);
2868
2869         oldreg = reg = rd32(hw, hw->aq.asq.len);
2870         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2871                 device_printf(dev, "ASQ VF Error detected\n");
2872                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2873                 aq_error = true;
2874         }
2875         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2876                 device_printf(dev, "ASQ Overflow Error detected\n");
2877                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2878                 aq_error = true;
2879         }
2880         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2881                 device_printf(dev, "ASQ Critical Error detected\n");
2882                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2883                 aq_error = true;
2884         }
2885         if (oldreg != reg)
2886                 wr32(hw, hw->aq.asq.len, reg);
2887
2888         if (aq_error) {
2889                 /* Need to reset adapter */
2890                 device_printf(dev, "WARNING: Resetting!\n");
2891                 sc->init_state = IXLV_RESET_REQUIRED;
2892                 ixlv_stop(sc);
2893                 ixlv_init_locked(sc);
2894         }
2895         ixlv_enable_adminq_irq(hw);
2896 }
2897
2898 static void
2899 ixlv_add_sysctls(struct ixlv_sc *sc)
2900 {
2901         device_t dev = sc->dev;
2902         struct ixl_vsi *vsi = &sc->vsi;
2903         struct i40e_eth_stats *es = &vsi->eth_stats;
2904
2905         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2906         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2907         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2908
2909         struct sysctl_oid *vsi_node, *queue_node;
2910         struct sysctl_oid_list *vsi_list, *queue_list;
2911
2912 #define QUEUE_NAME_LEN 32
2913         char queue_namebuf[QUEUE_NAME_LEN];
2914
2915         struct ixl_queue *queues = vsi->queues;
2916         struct tx_ring *txr;
2917         struct rx_ring *rxr;
2918
2919         /* Driver statistics sysctls */
2920         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2921                         CTLFLAG_RD, &sc->watchdog_events,
2922                         "Watchdog timeouts");
2923         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2924                         CTLFLAG_RD, &sc->admin_irq,
2925                         "Admin Queue IRQ Handled");
2926
2927         /* VSI statistics sysctls */
2928         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2929                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2930         vsi_list = SYSCTL_CHILDREN(vsi_node);
2931
2932         struct ixl_sysctl_info ctls[] =
2933         {
2934                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2935                 {&es->rx_unicast, "ucast_pkts_rcvd",
2936                         "Unicast Packets Received"},
2937                 {&es->rx_multicast, "mcast_pkts_rcvd",
2938                         "Multicast Packets Received"},
2939                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2940                         "Broadcast Packets Received"},
2941                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2942                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2943                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2944                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2945                 {&es->tx_multicast, "mcast_pkts_txd",
2946                         "Multicast Packets Transmitted"},
2947                 {&es->tx_broadcast, "bcast_pkts_txd",
2948                         "Broadcast Packets Transmitted"},
2949                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2950                 // end
2951                 {0,0,0}
2952         };
2953         struct ixl_sysctl_info *entry = ctls;
2954         while (entry->stat != NULL)
2955         {
2956                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2957                                 CTLFLAG_RD, entry->stat,
2958                                 entry->description);
2959                 entry++;
2960         }
2961
2962         /* Queue sysctls */
2963         for (int q = 0; q < vsi->num_queues; q++) {
2964                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2965                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2966                                              CTLFLAG_RD, NULL, "Queue Name");
2967                 queue_list = SYSCTL_CHILDREN(queue_node);
2968
2969                 txr = &(queues[q].txr);
2970                 rxr = &(queues[q].rxr);
2971
2972                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2973                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2974                                 "m_defrag() failed");
2975                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2976                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2977                                 "Driver dropped packets");
2978                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2979                                 CTLFLAG_RD, &(queues[q].irqs),
2980                                 "irqs on this queue");
2981                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2982                                 CTLFLAG_RD, &(queues[q].tso),
2983                                 "TSO");
2984                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2985                                 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2986                                 "Driver tx dma failure in xmit");
2987                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2988                                 CTLFLAG_RD, &(txr->no_desc),
2989                                 "Queue No Descriptor Available");
2990                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2991                                 CTLFLAG_RD, &(txr->total_packets),
2992                                 "Queue Packets Transmitted");
2993                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2994                                 CTLFLAG_RD, &(txr->tx_bytes),
2995                                 "Queue Bytes Transmitted");
2996                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2997                                 CTLFLAG_RD, &(rxr->rx_packets),
2998                                 "Queue Packets Received");
2999                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3000                                 CTLFLAG_RD, &(rxr->rx_bytes),
3001                                 "Queue Bytes Received");
3002                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3003                                 CTLFLAG_RD, &(rxr->itr), 0,
3004                                 "Queue Rx ITR Interval");
3005                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3006                                 CTLFLAG_RD, &(txr->itr), 0,
3007                                 "Queue Tx ITR Interval");
3008
3009 #ifdef IXL_DEBUG
3010                 /* Examine queue state */
3011                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
3012                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3013                                 sizeof(struct ixl_queue),
3014                                 ixlv_sysctl_qtx_tail_handler, "IU",
3015                                 "Queue Transmit Descriptor Tail");
3016                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
3017                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3018                                 sizeof(struct ixl_queue),
3019                                 ixlv_sysctl_qrx_tail_handler, "IU",
3020                                 "Queue Receive Descriptor Tail");
3021                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3022                                 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3023                                 "Ticks before watchdog event is triggered");
3024 #endif
3025         }
3026 }
3027
3028 static void
3029 ixlv_init_filters(struct ixlv_sc *sc)
3030 {
3031         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3032             M_DEVBUF, M_NOWAIT | M_ZERO);
3033         SLIST_INIT(sc->mac_filters);
3034         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3035             M_DEVBUF, M_NOWAIT | M_ZERO);
3036         SLIST_INIT(sc->vlan_filters);
3037         return;
3038 }
3039
3040 static void
3041 ixlv_free_filters(struct ixlv_sc *sc)
3042 {
3043         struct ixlv_mac_filter *f;
3044         struct ixlv_vlan_filter *v;
3045
3046         while (!SLIST_EMPTY(sc->mac_filters)) {
3047                 f = SLIST_FIRST(sc->mac_filters);
3048                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3049                 free(f, M_DEVBUF);
3050         }
3051         while (!SLIST_EMPTY(sc->vlan_filters)) {
3052                 v = SLIST_FIRST(sc->vlan_filters);
3053                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3054                 free(v, M_DEVBUF);
3055         }
3056         return;
3057 }
3058
3059 #ifdef IXL_DEBUG
3060 /**
3061  * ixlv_sysctl_qtx_tail_handler
3062  * Retrieves I40E_QTX_TAIL1 value from hardware
3063  * for a sysctl.
3064  */
3065 static int 
3066 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3067 {
3068         struct ixl_queue *que;
3069         int error;
3070         u32 val;
3071
3072         que = ((struct ixl_queue *)oidp->oid_arg1);
3073         if (!que) return 0;
3074
3075         val = rd32(que->vsi->hw, que->txr.tail);
3076         error = sysctl_handle_int(oidp, &val, 0, req);
3077         if (error || !req->newptr)
3078                 return error;
3079         return (0);
3080 }
3081
3082 /**
3083  * ixlv_sysctl_qrx_tail_handler
3084  * Retrieves I40E_QRX_TAIL1 value from hardware
3085  * for a sysctl.
3086  */
3087 static int 
3088 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3089 {
3090         struct ixl_queue *que;
3091         int error;
3092         u32 val;
3093
3094         que = ((struct ixl_queue *)oidp->oid_arg1);
3095         if (!que) return 0;
3096
3097         val = rd32(que->vsi->hw, que->rxr.tail);
3098         error = sysctl_handle_int(oidp, &val, 0, req);
3099         if (error || !req->newptr)
3100                 return error;
3101         return (0);
3102 }
3103 #endif
3104