]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/ixl/if_ixlv.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #endif
39
40 #include "ixl.h"
41 #include "ixlv.h"
42
43 #ifdef RSS
44 #include <net/rss_config.h>
45 #endif
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 char ixlv_driver_version[] = "1.2.6";
51
52 /*********************************************************************
53  *  PCI Device ID Table
54  *
55  *  Used by probe to select devices to load on
56  *  Last field stores an index into ixlv_strings
57  *  Last entry must be all 0s
58  *
59  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60  *********************************************************************/
61
62 static ixl_vendor_info_t ixlv_vendor_info_array[] =
63 {
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
66         /* required last entry */
67         {0, 0, 0, 0, 0}
68 };
69
70 /*********************************************************************
71  *  Table of branding strings
72  *********************************************************************/
73
74 static char    *ixlv_strings[] = {
75         "Intel(R) Ethernet Connection XL710 VF Driver"
76 };
77
78
79 /*********************************************************************
80  *  Function prototypes
81  *********************************************************************/
82 static int      ixlv_probe(device_t);
83 static int      ixlv_attach(device_t);
84 static int      ixlv_detach(device_t);
85 static int      ixlv_shutdown(device_t);
86 static void     ixlv_init_locked(struct ixlv_sc *);
87 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
88 static void     ixlv_free_pci_resources(struct ixlv_sc *);
89 static int      ixlv_assign_msix(struct ixlv_sc *);
90 static int      ixlv_init_msix(struct ixlv_sc *);
91 static int      ixlv_init_taskqueue(struct ixlv_sc *);
92 static int      ixlv_setup_queues(struct ixlv_sc *);
93 static void     ixlv_config_rss(struct ixlv_sc *);
94 static void     ixlv_stop(struct ixlv_sc *);
95 static void     ixlv_add_multi(struct ixl_vsi *);
96 static void     ixlv_del_multi(struct ixl_vsi *);
97 static void     ixlv_free_queues(struct ixl_vsi *);
98 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
99
100 static int      ixlv_media_change(struct ifnet *);
101 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
102
103 static void     ixlv_local_timer(void *);
104
105 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107 static void     ixlv_init_filters(struct ixlv_sc *);
108 static void     ixlv_free_filters(struct ixlv_sc *);
109
110 static void     ixlv_msix_que(void *);
111 static void     ixlv_msix_adminq(void *);
112 static void     ixlv_do_adminq(void *, int);
113 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
114 static void     ixlv_handle_que(void *, int);
115 static int      ixlv_reset(struct ixlv_sc *);
116 static int      ixlv_reset_complete(struct i40e_hw *);
117 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
118 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
119 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120                     enum i40e_status_code);
121
122 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
123 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
124 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
125 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
126
127 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
128 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
129 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
130
131 static void     ixlv_init_hw(struct ixlv_sc *);
132 static int      ixlv_setup_vc(struct ixlv_sc *);
133 static int      ixlv_vf_config(struct ixlv_sc *);
134
135 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
136                     struct ifnet *, int);
137
138 static void     ixlv_add_sysctls(struct ixlv_sc *);
139 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
140 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
141
142 /*********************************************************************
143  *  FreeBSD Device Interface Entry Points
144  *********************************************************************/
145
146 static device_method_t ixlv_methods[] = {
147         /* Device interface */
148         DEVMETHOD(device_probe, ixlv_probe),
149         DEVMETHOD(device_attach, ixlv_attach),
150         DEVMETHOD(device_detach, ixlv_detach),
151         DEVMETHOD(device_shutdown, ixlv_shutdown),
152         {0, 0}
153 };
154
155 static driver_t ixlv_driver = {
156         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
157 };
158
159 devclass_t ixlv_devclass;
160 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
161
162 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
164
165 /*
166 ** TUNEABLE PARAMETERS:
167 */
168
169 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
170                    "IXLV driver parameters");
171
172 /*
173 ** Number of descriptors per ring:
174 **   - TX and RX are the same size
175 */
176 static int ixlv_ringsz = DEFAULT_RING;
177 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
178 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
179     &ixlv_ringsz, 0, "Descriptor Ring Size");
180
181 /* Set to zero to auto calculate  */
182 int ixlv_max_queues = 0;
183 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
184 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
185     &ixlv_max_queues, 0, "Number of Queues");
186
187 /*
188 ** Number of entries in Tx queue buf_ring.
189 ** Increasing this will reduce the number of
190 ** errors when transmitting fragmented UDP
191 ** packets.
192 */
193 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
194 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
195 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
196     &ixlv_txbrsz, 0, "TX Buf Ring Size");
197
198 /*
199 ** Controls for Interrupt Throttling
200 **      - true/false for dynamic adjustment
201 **      - default values for static ITR
202 */
203 int ixlv_dynamic_rx_itr = 0;
204 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
205 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
206     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
207
208 int ixlv_dynamic_tx_itr = 0;
209 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
210 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
211     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
212
213 int ixlv_rx_itr = IXL_ITR_8K;
214 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
215 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
216     &ixlv_rx_itr, 0, "RX Interrupt Rate");
217
218 int ixlv_tx_itr = IXL_ITR_4K;
219 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
220 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
221     &ixlv_tx_itr, 0, "TX Interrupt Rate");
222
223         
224 /*********************************************************************
225  *  Device identification routine
226  *
227  *  ixlv_probe determines if the driver should be loaded on
228  *  the hardware based on PCI vendor/device id of the device.
229  *
230  *  return BUS_PROBE_DEFAULT on success, positive on failure
231  *********************************************************************/
232
233 static int
234 ixlv_probe(device_t dev)
235 {
236         ixl_vendor_info_t *ent;
237
238         u16     pci_vendor_id, pci_device_id;
239         u16     pci_subvendor_id, pci_subdevice_id;
240         char    device_name[256];
241
242         INIT_DEBUGOUT("ixlv_probe: begin");
243
244         pci_vendor_id = pci_get_vendor(dev);
245         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
246                 return (ENXIO);
247
248         pci_device_id = pci_get_device(dev);
249         pci_subvendor_id = pci_get_subvendor(dev);
250         pci_subdevice_id = pci_get_subdevice(dev);
251
252         ent = ixlv_vendor_info_array;
253         while (ent->vendor_id != 0) {
254                 if ((pci_vendor_id == ent->vendor_id) &&
255                     (pci_device_id == ent->device_id) &&
256
257                     ((pci_subvendor_id == ent->subvendor_id) ||
258                      (ent->subvendor_id == 0)) &&
259
260                     ((pci_subdevice_id == ent->subdevice_id) ||
261                      (ent->subdevice_id == 0))) {
262                         sprintf(device_name, "%s, Version - %s",
263                                 ixlv_strings[ent->index],
264                                 ixlv_driver_version);
265                         device_set_desc_copy(dev, device_name);
266                         return (BUS_PROBE_DEFAULT);
267                 }
268                 ent++;
269         }
270         return (ENXIO);
271 }
272
273 /*********************************************************************
274  *  Device initialization routine
275  *
276  *  The attach entry point is called when the driver is being loaded.
277  *  This routine identifies the type of hardware, allocates all resources
278  *  and initializes the hardware.
279  *
280  *  return 0 on success, positive on failure
281  *********************************************************************/
282
283 static int
284 ixlv_attach(device_t dev)
285 {
286         struct ixlv_sc  *sc;
287         struct i40e_hw  *hw;
288         struct ixl_vsi  *vsi;
289         int             error = 0;
290
291         INIT_DBG_DEV(dev, "begin");
292
293         /* Allocate, clear, and link in our primary soft structure */
294         sc = device_get_softc(dev);
295         sc->dev = sc->osdep.dev = dev;
296         hw = &sc->hw;
297         vsi = &sc->vsi;
298         vsi->dev = dev;
299
300         /* Initialize hw struct */
301         ixlv_init_hw(sc);
302
303         /* Allocate filter lists */
304         ixlv_init_filters(sc);
305
306         /* Core Lock Init*/
307         mtx_init(&sc->mtx, device_get_nameunit(dev),
308             "IXL SC Lock", MTX_DEF);
309
310         /* Set up the timer callout */
311         callout_init_mtx(&sc->timer, &sc->mtx, 0);
312
313         /* Do PCI setup - map BAR0, etc */
314         if (ixlv_allocate_pci_resources(sc)) {
315                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
316                     __func__);
317                 error = ENXIO;
318                 goto err_early;
319         }
320
321         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
322
323         error = i40e_set_mac_type(hw);
324         if (error) {
325                 device_printf(dev, "%s: set_mac_type failed: %d\n",
326                     __func__, error);
327                 goto err_pci_res;
328         }
329
330         error = ixlv_reset_complete(hw);
331         if (error) {
332                 device_printf(dev, "%s: Device is still being reset\n",
333                     __func__);
334                 goto err_pci_res;
335         }
336
337         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
338
339         error = ixlv_setup_vc(sc);
340         if (error) {
341                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
342                     __func__, error);
343                 goto err_pci_res;
344         }
345
346         INIT_DBG_DEV(dev, "PF API version verified");
347
348         /* TODO: Figure out why MDD events occur when this reset is removed. */
349         /* Need API version before sending reset message */
350         error = ixlv_reset(sc);
351         if (error) {
352                 device_printf(dev, "VF reset failed; reload the driver\n");
353                 goto err_aq;
354         }
355
356         INIT_DBG_DEV(dev, "VF reset complete");
357
358         /* Ask for VF config from PF */
359         error = ixlv_vf_config(sc);
360         if (error) {
361                 device_printf(dev, "Error getting configuration from PF: %d\n",
362                     error);
363                 goto err_aq;
364         }
365
366         INIT_DBG_DEV(dev, "VF config from PF:");
367         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
368             sc->vf_res->num_vsis,
369             sc->vf_res->num_queue_pairs,
370             sc->vf_res->max_vectors,
371             sc->vf_res->max_mtu);
372         INIT_DBG_DEV(dev, "Offload flags: %#010x",
373             sc->vf_res->vf_offload_flags);
374
375         // TODO: Move this into ixlv_vf_config?
376         /* got VF config message back from PF, now we can parse it */
377         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
378                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
379                         sc->vsi_res = &sc->vf_res->vsi_res[i];
380         }
381         if (!sc->vsi_res) {
382                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
383                 error = EIO;
384                 goto err_res_buf;
385         }
386
387         INIT_DBG_DEV(dev, "Resource Acquisition complete");
388
389         /* If no mac address was assigned just make a random one */
390         if (!ixlv_check_ether_addr(hw->mac.addr)) {
391                 u8 addr[ETHER_ADDR_LEN];
392                 arc4rand(&addr, sizeof(addr), 0);
393                 addr[0] &= 0xFE;
394                 addr[0] |= 0x02;
395                 bcopy(addr, hw->mac.addr, sizeof(addr));
396         }
397
398         vsi->id = sc->vsi_res->vsi_id;
399         vsi->back = (void *)sc;
400         sc->link_up = TRUE;
401
402         /* This allocates the memory and early settings */
403         if (ixlv_setup_queues(sc) != 0) {
404                 device_printf(dev, "%s: setup queues failed!\n",
405                     __func__);
406                 error = EIO;
407                 goto out;
408         }
409
410         /* Setup the stack interface */
411         if (ixlv_setup_interface(dev, sc) != 0) {
412                 device_printf(dev, "%s: setup interface failed!\n",
413                     __func__);
414                 error = EIO;
415                 goto out;
416         }
417
418         INIT_DBG_DEV(dev, "Queue memory and interface setup");
419
420         /* Do queue interrupt setup */
421         ixlv_assign_msix(sc);
422
423         /* Start AdminQ taskqueue */
424         ixlv_init_taskqueue(sc);
425
426         /* Initialize stats */
427         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
428         ixlv_add_sysctls(sc);
429
430         /* Register for VLAN events */
431         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
435
436         /* We want AQ enabled early */
437         ixlv_enable_adminq_irq(hw);
438
439         /* Set things up to run init */
440         sc->init_state = IXLV_INIT_READY;
441
442         ixl_vc_init_mgr(sc, &sc->vc_mgr);
443
444         INIT_DBG_DEV(dev, "end");
445         return (error);
446
447 out:
448         ixlv_free_queues(vsi);
449 err_res_buf:
450         free(sc->vf_res, M_DEVBUF);
451 err_aq:
452         i40e_shutdown_adminq(hw);
453 err_pci_res:
454         ixlv_free_pci_resources(sc);
455 err_early:
456         mtx_destroy(&sc->mtx);
457         ixlv_free_filters(sc);
458         INIT_DBG_DEV(dev, "end: error %d", error);
459         return (error);
460 }
461
462 /*********************************************************************
463  *  Device removal routine
464  *
465  *  The detach entry point is called when the driver is being removed.
466  *  This routine stops the adapter and deallocates all the resources
467  *  that were allocated for driver operation.
468  *
469  *  return 0 on success, positive on failure
470  *********************************************************************/
471
472 static int
473 ixlv_detach(device_t dev)
474 {
475         struct ixlv_sc  *sc = device_get_softc(dev);
476         struct ixl_vsi  *vsi = &sc->vsi;
477
478         INIT_DBG_DEV(dev, "begin");
479
480         /* Make sure VLANS are not using driver */
481         if (vsi->ifp->if_vlantrunk != NULL) {
482                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
483                 INIT_DBG_DEV(dev, "end");
484                 return (EBUSY);
485         }
486
487         /* Stop driver */
488         ether_ifdetach(vsi->ifp);
489         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
490                 mtx_lock(&sc->mtx);     
491                 ixlv_stop(sc);
492                 mtx_unlock(&sc->mtx);   
493         }
494
495         /* Unregister VLAN events */
496         if (vsi->vlan_attach != NULL)
497                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
498         if (vsi->vlan_detach != NULL)
499                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
500
501         /* Drain VC mgr */
502         callout_drain(&sc->vc_mgr.callout);
503
504         i40e_shutdown_adminq(&sc->hw);
505         taskqueue_free(sc->tq);
506         if_free(vsi->ifp);
507         free(sc->vf_res, M_DEVBUF);
508         ixlv_free_pci_resources(sc);
509         ixlv_free_queues(vsi);
510         mtx_destroy(&sc->mtx);
511         ixlv_free_filters(sc);
512
513         bus_generic_detach(dev);
514         INIT_DBG_DEV(dev, "end");
515         return (0);
516 }
517
518 /*********************************************************************
519  *
520  *  Shutdown entry point
521  *
522  **********************************************************************/
523
524 static int
525 ixlv_shutdown(device_t dev)
526 {
527         struct ixlv_sc  *sc = device_get_softc(dev);
528
529         INIT_DBG_DEV(dev, "begin");
530
531         mtx_lock(&sc->mtx);     
532         ixlv_stop(sc);
533         mtx_unlock(&sc->mtx);   
534
535         INIT_DBG_DEV(dev, "end");
536         return (0);
537 }
538
539 /*
540  * Configure TXCSUM(IPV6) and TSO(4/6)
541  *      - the hardware handles these together so we
542  *        need to tweak them 
543  */
544 static void
545 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
546 {
547         /* Enable/disable TXCSUM/TSO4 */
548         if (!(ifp->if_capenable & IFCAP_TXCSUM)
549             && !(ifp->if_capenable & IFCAP_TSO4)) {
550                 if (mask & IFCAP_TXCSUM) {
551                         ifp->if_capenable |= IFCAP_TXCSUM;
552                         /* enable TXCSUM, restore TSO if previously enabled */
553                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
554                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555                                 ifp->if_capenable |= IFCAP_TSO4;
556                         }
557                 }
558                 else if (mask & IFCAP_TSO4) {
559                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
560                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
561                         if_printf(ifp,
562                             "TSO4 requires txcsum, enabling both...\n");
563                 }
564         } else if((ifp->if_capenable & IFCAP_TXCSUM)
565             && !(ifp->if_capenable & IFCAP_TSO4)) {
566                 if (mask & IFCAP_TXCSUM)
567                         ifp->if_capenable &= ~IFCAP_TXCSUM;
568                 else if (mask & IFCAP_TSO4)
569                         ifp->if_capenable |= IFCAP_TSO4;
570         } else if((ifp->if_capenable & IFCAP_TXCSUM)
571             && (ifp->if_capenable & IFCAP_TSO4)) {
572                 if (mask & IFCAP_TXCSUM) {
573                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
574                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
575                         if_printf(ifp, 
576                             "TSO4 requires txcsum, disabling both...\n");
577                 } else if (mask & IFCAP_TSO4)
578                         ifp->if_capenable &= ~IFCAP_TSO4;
579         }
580
581         /* Enable/disable TXCSUM_IPV6/TSO6 */
582         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
583             && !(ifp->if_capenable & IFCAP_TSO6)) {
584                 if (mask & IFCAP_TXCSUM_IPV6) {
585                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
586                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
587                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
588                                 ifp->if_capenable |= IFCAP_TSO6;
589                         }
590                 } else if (mask & IFCAP_TSO6) {
591                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
592                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
593                         if_printf(ifp,
594                             "TSO6 requires txcsum6, enabling both...\n");
595                 }
596         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
597             && !(ifp->if_capenable & IFCAP_TSO6)) {
598                 if (mask & IFCAP_TXCSUM_IPV6)
599                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
600                 else if (mask & IFCAP_TSO6)
601                         ifp->if_capenable |= IFCAP_TSO6;
602         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603             && (ifp->if_capenable & IFCAP_TSO6)) {
604                 if (mask & IFCAP_TXCSUM_IPV6) {
605                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
606                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
607                         if_printf(ifp,
608                             "TSO6 requires txcsum6, disabling both...\n");
609                 } else if (mask & IFCAP_TSO6)
610                         ifp->if_capenable &= ~IFCAP_TSO6;
611         }
612 }
613
614 /*********************************************************************
615  *  Ioctl entry point
616  *
617  *  ixlv_ioctl is called when the user wants to configure the
618  *  interface.
619  *
620  *  return 0 on success, positive on failure
621  **********************************************************************/
622
623 static int
624 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
625 {
626         struct ixl_vsi          *vsi = ifp->if_softc;
627         struct ixlv_sc  *sc = vsi->back;
628         struct ifreq            *ifr = (struct ifreq *)data;
629 #if defined(INET) || defined(INET6)
630         struct ifaddr           *ifa = (struct ifaddr *)data;
631         bool                    avoid_reset = FALSE;
632 #endif
633         int                     error = 0;
634
635
636         switch (command) {
637
638         case SIOCSIFADDR:
639 #ifdef INET
640                 if (ifa->ifa_addr->sa_family == AF_INET)
641                         avoid_reset = TRUE;
642 #endif
643 #ifdef INET6
644                 if (ifa->ifa_addr->sa_family == AF_INET6)
645                         avoid_reset = TRUE;
646 #endif
647 #if defined(INET) || defined(INET6)
648                 /*
649                 ** Calling init results in link renegotiation,
650                 ** so we avoid doing it when possible.
651                 */
652                 if (avoid_reset) {
653                         ifp->if_flags |= IFF_UP;
654                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
655                                 ixlv_init(vsi);
656 #ifdef INET
657                         if (!(ifp->if_flags & IFF_NOARP))
658                                 arp_ifinit(ifp, ifa);
659 #endif
660                 } else
661                         error = ether_ioctl(ifp, command, data);
662                 break;
663 #endif
664         case SIOCSIFMTU:
665                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
666                 mtx_lock(&sc->mtx);
667                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
668                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
669                         error = EINVAL;
670                         IOCTL_DBG_IF(ifp, "mtu too large");
671                 } else {
672                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
673                         // ERJ: Interestingly enough, these types don't match
674                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
675                         vsi->max_frame_size =
676                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
677                             + ETHER_VLAN_ENCAP_LEN;
678                         ixlv_init_locked(sc);
679                 }
680                 mtx_unlock(&sc->mtx);
681                 break;
682         case SIOCSIFFLAGS:
683                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
684                 mtx_lock(&sc->mtx);
685                 if (ifp->if_flags & IFF_UP) {
686                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
687                                 ixlv_init_locked(sc);
688                 } else
689                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
690                                 ixlv_stop(sc);
691                 sc->if_flags = ifp->if_flags;
692                 mtx_unlock(&sc->mtx);
693                 break;
694         case SIOCADDMULTI:
695                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
696                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
697                         mtx_lock(&sc->mtx);
698                         ixlv_disable_intr(vsi);
699                         ixlv_add_multi(vsi);
700                         ixlv_enable_intr(vsi);
701                         mtx_unlock(&sc->mtx);
702                 }
703                 break;
704         case SIOCDELMULTI:
705                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
706                 if (sc->init_state == IXLV_RUNNING) {
707                         mtx_lock(&sc->mtx);
708                         ixlv_disable_intr(vsi);
709                         ixlv_del_multi(vsi);
710                         ixlv_enable_intr(vsi);
711                         mtx_unlock(&sc->mtx);
712                 }
713                 break;
714         case SIOCSIFMEDIA:
715         case SIOCGIFMEDIA:
716                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
717                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
718                 break;
719         case SIOCSIFCAP:
720         {
721                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
722                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
723
724                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
725
726                 if (mask & IFCAP_RXCSUM)
727                         ifp->if_capenable ^= IFCAP_RXCSUM;
728                 if (mask & IFCAP_RXCSUM_IPV6)
729                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
730                 if (mask & IFCAP_LRO)
731                         ifp->if_capenable ^= IFCAP_LRO;
732                 if (mask & IFCAP_VLAN_HWTAGGING)
733                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
734                 if (mask & IFCAP_VLAN_HWFILTER)
735                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
736                 if (mask & IFCAP_VLAN_HWTSO)
737                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
738                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
739                         ixlv_init(vsi);
740                 }
741                 VLAN_CAPABILITIES(ifp);
742
743                 break;
744         }
745
746         default:
747                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
748                 error = ether_ioctl(ifp, command, data);
749                 break;
750         }
751
752         return (error);
753 }
754
755 /*
756 ** To do a reinit on the VF is unfortunately more complicated
757 ** than a physical device, we must have the PF more or less
758 ** completely recreate our memory, so many things that were
759 ** done only once at attach in traditional drivers now must be
760 ** redone at each reinitialization. This function does that
761 ** 'prelude' so we can then call the normal locked init code.
762 */
763 int
764 ixlv_reinit_locked(struct ixlv_sc *sc)
765 {
766         struct i40e_hw          *hw = &sc->hw;
767         struct ixl_vsi          *vsi = &sc->vsi;
768         struct ifnet            *ifp = vsi->ifp;
769         struct ixlv_mac_filter  *mf, *mf_temp;
770         struct ixlv_vlan_filter *vf;
771         int                     error = 0;
772
773         INIT_DBG_IF(ifp, "begin");
774
775         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
776                 ixlv_stop(sc);
777
778         error = ixlv_reset(sc);
779
780         INIT_DBG_IF(ifp, "VF was reset");
781
782         /* set the state in case we went thru RESET */
783         sc->init_state = IXLV_RUNNING;
784
785         /*
786         ** Resetting the VF drops all filters from hardware;
787         ** we need to mark them to be re-added in init.
788         */
789         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
790                 if (mf->flags & IXL_FILTER_DEL) {
791                         SLIST_REMOVE(sc->mac_filters, mf,
792                             ixlv_mac_filter, next);
793                         free(mf, M_DEVBUF);
794                 } else
795                         mf->flags |= IXL_FILTER_ADD;
796         }
797         if (vsi->num_vlans != 0)
798                 SLIST_FOREACH(vf, sc->vlan_filters, next)
799                         vf->flags = IXL_FILTER_ADD;
800         else { /* clean any stale filters */
801                 while (!SLIST_EMPTY(sc->vlan_filters)) {
802                         vf = SLIST_FIRST(sc->vlan_filters);
803                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
804                         free(vf, M_DEVBUF);
805                 }
806         }
807
808         ixlv_enable_adminq_irq(hw);
809         ixl_vc_flush(&sc->vc_mgr);
810
811         INIT_DBG_IF(ifp, "end");
812         return (error);
813 }
814
815 static void
816 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
817         enum i40e_status_code code)
818 {
819         struct ixlv_sc *sc;
820
821         sc = arg;
822
823         /*
824          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
825          * happens while a command is in progress, so we don't print an error
826          * in that case.
827          */
828         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
829                 if_printf(sc->vsi.ifp,
830                     "Error %d waiting for PF to complete operation %d\n",
831                     code, cmd->request);
832         }
833 }
834
835 static void
836 ixlv_init_locked(struct ixlv_sc *sc)
837 {
838         struct i40e_hw          *hw = &sc->hw;
839         struct ixl_vsi          *vsi = &sc->vsi;
840         struct ixl_queue        *que = vsi->queues;
841         struct ifnet            *ifp = vsi->ifp;
842         int                      error = 0;
843
844         INIT_DBG_IF(ifp, "begin");
845
846         IXLV_CORE_LOCK_ASSERT(sc);
847
848         /* Do a reinit first if an init has already been done */
849         if ((sc->init_state == IXLV_RUNNING) ||
850             (sc->init_state == IXLV_RESET_REQUIRED) ||
851             (sc->init_state == IXLV_RESET_PENDING))
852                 error = ixlv_reinit_locked(sc);
853         /* Don't bother with init if we failed reinit */
854         if (error)
855                 goto init_done;
856
857         /* Remove existing MAC filter if new MAC addr is set */
858         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
859                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
860                 if (error == 0)
861                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
862                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
863                             sc);
864         }
865
866         /* Check for an LAA mac address... */
867         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
868
869         ifp->if_hwassist = 0;
870         if (ifp->if_capenable & IFCAP_TSO)
871                 ifp->if_hwassist |= CSUM_TSO;
872         if (ifp->if_capenable & IFCAP_TXCSUM)
873                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
874         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
875                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
876
877         /* Add mac filter for this VF to PF */
878         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
879                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
880                 if (!error || error == EEXIST)
881                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
882                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
883                             sc);
884         }
885
886         /* Setup vlan's if needed */
887         ixlv_setup_vlan_filters(sc);
888
889         /* Prepare the queues for operation */
890         for (int i = 0; i < vsi->num_queues; i++, que++) {
891                 struct  rx_ring *rxr = &que->rxr;
892
893                 ixl_init_tx_ring(que);
894
895                 if (vsi->max_frame_size <= MCLBYTES)
896                         rxr->mbuf_sz = MCLBYTES;
897                 else
898                         rxr->mbuf_sz = MJUMPAGESIZE;
899                 ixl_init_rx_ring(que);
900         }
901
902         /* Configure queues */
903         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
904             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
905
906         /* Set up RSS */
907         ixlv_config_rss(sc);
908
909         /* Map vectors */
910         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
911             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
912
913         /* Enable queues */
914         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
915             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
916
917         /* Start the local timer */
918         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
919
920         sc->init_state = IXLV_RUNNING;
921
922 init_done:
923         INIT_DBG_IF(ifp, "end");
924         return;
925 }
926
927 /*
928 **  Init entry point for the stack
929 */
930 void
931 ixlv_init(void *arg)
932 {
933         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
934         struct ixlv_sc *sc = vsi->back;
935         int retries = 0;
936
937         mtx_lock(&sc->mtx);
938         ixlv_init_locked(sc);
939         mtx_unlock(&sc->mtx);
940
941         /* Wait for init_locked to finish */
942         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
943             && ++retries < 100) {
944                 i40e_msec_delay(10);
945         }
946         if (retries >= IXLV_AQ_MAX_ERR)
947                 if_printf(vsi->ifp,
948                     "Init failed to complete in alloted time!\n");
949 }
950
951 /*
952  * ixlv_attach() helper function; gathers information about
953  * the (virtual) hardware for use elsewhere in the driver.
954  */
955 static void
956 ixlv_init_hw(struct ixlv_sc *sc)
957 {
958         struct i40e_hw *hw = &sc->hw;
959         device_t dev = sc->dev;
960         
961         /* Save off the information about this board */
962         hw->vendor_id = pci_get_vendor(dev);
963         hw->device_id = pci_get_device(dev);
964         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
965         hw->subsystem_vendor_id =
966             pci_read_config(dev, PCIR_SUBVEND_0, 2);
967         hw->subsystem_device_id =
968             pci_read_config(dev, PCIR_SUBDEV_0, 2);
969
970         hw->bus.device = pci_get_slot(dev);
971         hw->bus.func = pci_get_function(dev);
972 }
973
974 /*
975  * ixlv_attach() helper function; initalizes the admin queue
976  * and attempts to establish contact with the PF by
977  * retrying the initial "API version" message several times
978  * or until the PF responds.
979  */
980 static int
981 ixlv_setup_vc(struct ixlv_sc *sc)
982 {
983         struct i40e_hw *hw = &sc->hw;
984         device_t dev = sc->dev;
985         int error = 0, ret_error = 0, asq_retries = 0;
986         bool send_api_ver_retried = 0;
987
988         /* Need to set these AQ paramters before initializing AQ */
989         hw->aq.num_arq_entries = IXL_AQ_LEN;
990         hw->aq.num_asq_entries = IXL_AQ_LEN;
991         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
992         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
993
994         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
995                 /* Initialize admin queue */
996                 error = i40e_init_adminq(hw);
997                 if (error) {
998                         device_printf(dev, "%s: init_adminq failed: %d\n",
999                             __func__, error);
1000                         ret_error = 1;
1001                         continue;
1002                 }
1003
1004                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1005
1006 retry_send:
1007                 /* Send VF's API version */
1008                 error = ixlv_send_api_ver(sc);
1009                 if (error) {
1010                         i40e_shutdown_adminq(hw);
1011                         ret_error = 2;
1012                         device_printf(dev, "%s: unable to send api"
1013                             " version to PF on attempt %d, error %d\n",
1014                             __func__, i+1, error);
1015                 }
1016
1017                 asq_retries = 0;
1018                 while (!i40e_asq_done(hw)) {
1019                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1020                                 i40e_shutdown_adminq(hw);
1021                                 DDPRINTF(dev, "Admin Queue timeout "
1022                                     "(waiting for send_api_ver), %d more retries...",
1023                                     IXLV_AQ_MAX_ERR - (i + 1));
1024                                 ret_error = 3;
1025                                 break;
1026                         } 
1027                         i40e_msec_delay(10);
1028                 }
1029                 if (asq_retries > IXLV_AQ_MAX_ERR)
1030                         continue;
1031
1032                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1033
1034                 /* Verify that the VF accepts the PF's API version */
1035                 error = ixlv_verify_api_ver(sc);
1036                 if (error == ETIMEDOUT) {
1037                         if (!send_api_ver_retried) {
1038                                 /* Resend message, one more time */
1039                                 send_api_ver_retried++;
1040                                 device_printf(dev,
1041                                     "%s: Timeout while verifying API version on first"
1042                                     " try!\n", __func__);
1043                                 goto retry_send;
1044                         } else {
1045                                 device_printf(dev,
1046                                     "%s: Timeout while verifying API version on second"
1047                                     " try!\n", __func__);
1048                                 ret_error = 4;
1049                                 break;
1050                         }
1051                 }
1052                 if (error) {
1053                         device_printf(dev,
1054                             "%s: Unable to verify API version,"
1055                             " error %d\n", __func__, error);
1056                         ret_error = 5;
1057                 }
1058                 break;
1059         }
1060
1061         if (ret_error >= 4)
1062                 i40e_shutdown_adminq(hw);
1063         return (ret_error);
1064 }
1065
1066 /*
1067  * ixlv_attach() helper function; asks the PF for this VF's
1068  * configuration, and saves the information if it receives it.
1069  */
1070 static int
1071 ixlv_vf_config(struct ixlv_sc *sc)
1072 {
1073         struct i40e_hw *hw = &sc->hw;
1074         device_t dev = sc->dev;
1075         int bufsz, error = 0, ret_error = 0;
1076         int asq_retries, retried = 0;
1077
1078 retry_config:
1079         error = ixlv_send_vf_config_msg(sc);
1080         if (error) {
1081                 device_printf(dev,
1082                     "%s: Unable to send VF config request, attempt %d,"
1083                     " error %d\n", __func__, retried + 1, error);
1084                 ret_error = 2;
1085         }
1086
1087         asq_retries = 0;
1088         while (!i40e_asq_done(hw)) {
1089                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1090                         device_printf(dev, "%s: Admin Queue timeout "
1091                             "(waiting for send_vf_config_msg), attempt %d\n",
1092                             __func__, retried + 1);
1093                         ret_error = 3;
1094                         goto fail;
1095                 }
1096                 i40e_msec_delay(10);
1097         }
1098
1099         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1100             retried + 1);
1101
1102         if (!sc->vf_res) {
1103                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1104                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1105                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1106                 if (!sc->vf_res) {
1107                         device_printf(dev,
1108                             "%s: Unable to allocate memory for VF configuration"
1109                             " message from PF on attempt %d\n", __func__, retried + 1);
1110                         ret_error = 1;
1111                         goto fail;
1112                 }
1113         }
1114
1115         /* Check for VF config response */
1116         error = ixlv_get_vf_config(sc);
1117         if (error == ETIMEDOUT) {
1118                 /* The 1st time we timeout, send the configuration message again */
1119                 if (!retried) {
1120                         retried++;
1121                         goto retry_config;
1122                 }
1123         }
1124         if (error) {
1125                 device_printf(dev,
1126                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1127                     __func__, retried + 1);
1128                 ret_error = 4;
1129         }
1130         goto done;
1131
1132 fail:
1133         free(sc->vf_res, M_DEVBUF);
1134 done:
1135         return (ret_error);
1136 }
1137
1138 /*
1139  * Allocate MSI/X vectors, setup the AQ vector early
1140  */
1141 static int
1142 ixlv_init_msix(struct ixlv_sc *sc)
1143 {
1144         device_t dev = sc->dev;
1145         int rid, want, vectors, queues, available;
1146
1147         rid = PCIR_BAR(IXL_BAR);
1148         sc->msix_mem = bus_alloc_resource_any(dev,
1149             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1150         if (!sc->msix_mem) {
1151                 /* May not be enabled */
1152                 device_printf(sc->dev,
1153                     "Unable to map MSIX table \n");
1154                 goto fail;
1155         }
1156
1157         available = pci_msix_count(dev); 
1158         if (available == 0) { /* system has msix disabled */
1159                 bus_release_resource(dev, SYS_RES_MEMORY,
1160                     rid, sc->msix_mem);
1161                 sc->msix_mem = NULL;
1162                 goto fail;
1163         }
1164
1165         /* Figure out a reasonable auto config value */
1166         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1167
1168         /* Override with hardcoded value if sane */
1169         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1170                 queues = ixlv_max_queues;
1171 #ifdef  RSS
1172         /* If we're doing RSS, clamp at the number of RSS buckets */
1173         if (queues > rss_getnumbuckets())
1174                 queues = rss_getnumbuckets();
1175 #endif
1176         /* Enforce the VF max value */
1177         if (queues > IXLV_MAX_QUEUES)
1178                 queues = IXLV_MAX_QUEUES;
1179
1180         /*
1181         ** Want one vector (RX/TX pair) per queue
1182         ** plus an additional for the admin queue.
1183         */
1184         want = queues + 1;
1185         if (want <= available)  /* Have enough */
1186                 vectors = want;
1187         else {
1188                 device_printf(sc->dev,
1189                     "MSIX Configuration Problem, "
1190                     "%d vectors available but %d wanted!\n",
1191                     available, want);
1192                 goto fail;
1193         }
1194
1195 #ifdef RSS
1196         /*
1197         * If we're doing RSS, the number of queues needs to
1198         * match the number of RSS buckets that are configured.
1199         *
1200         * + If there's more queues than RSS buckets, we'll end
1201         *   up with queues that get no traffic.
1202         *
1203         * + If there's more RSS buckets than queues, we'll end
1204         *   up having multiple RSS buckets map to the same queue,
1205         *   so there'll be some contention.
1206         */
1207         if (queues != rss_getnumbuckets()) {
1208                 device_printf(dev,
1209                     "%s: queues (%d) != RSS buckets (%d)"
1210                     "; performance will be impacted.\n",
1211                      __func__, queues, rss_getnumbuckets());
1212         }
1213 #endif
1214
1215         if (pci_alloc_msix(dev, &vectors) == 0) {
1216                 device_printf(sc->dev,
1217                     "Using MSIX interrupts with %d vectors\n", vectors);
1218                 sc->msix = vectors;
1219                 sc->vsi.num_queues = queues;
1220         }
1221
1222         /*
1223         ** Explicitly set the guest PCI BUSMASTER capability
1224         ** and we must rewrite the ENABLE in the MSIX control
1225         ** register again at this point to cause the host to
1226         ** successfully initialize us.
1227         */
1228         {
1229                 u16 pci_cmd_word;
1230                 int msix_ctrl;
1231                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1232                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1233                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1234                 pci_find_cap(dev, PCIY_MSIX, &rid);
1235                 rid += PCIR_MSIX_CTRL;
1236                 msix_ctrl = pci_read_config(dev, rid, 2);
1237                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1238                 pci_write_config(dev, rid, msix_ctrl, 2);
1239         }
1240
1241         /* Next we need to setup the vector for the Admin Queue */
1242         rid = 1;        // zero vector + 1
1243         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1244             &rid, RF_SHAREABLE | RF_ACTIVE);
1245         if (sc->res == NULL) {
1246                 device_printf(dev,"Unable to allocate"
1247                     " bus resource: AQ interrupt \n");
1248                 goto fail;
1249         }
1250         if (bus_setup_intr(dev, sc->res,
1251             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1252             ixlv_msix_adminq, sc, &sc->tag)) {
1253                 sc->res = NULL;
1254                 device_printf(dev, "Failed to register AQ handler");
1255                 goto fail;
1256         }
1257         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1258
1259         return (vectors);
1260
1261 fail:
1262         /* The VF driver MUST use MSIX */
1263         return (0);
1264 }
1265
1266 static int
1267 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1268 {
1269         int             rid;
1270         device_t        dev = sc->dev;
1271
1272         rid = PCIR_BAR(0);
1273         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1274             &rid, RF_ACTIVE);
1275
1276         if (!(sc->pci_mem)) {
1277                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1278                 return (ENXIO);
1279         }
1280
1281         sc->osdep.mem_bus_space_tag =
1282                 rman_get_bustag(sc->pci_mem);
1283         sc->osdep.mem_bus_space_handle =
1284                 rman_get_bushandle(sc->pci_mem);
1285         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1286         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1287         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1288
1289         sc->hw.back = &sc->osdep;
1290
1291         /* Disable adminq interrupts */
1292         ixlv_disable_adminq_irq(&sc->hw);
1293
1294         /*
1295         ** Now setup MSI/X, it will return
1296         ** us the number of supported vectors
1297         */
1298         sc->msix = ixlv_init_msix(sc);
1299
1300         /* We fail without MSIX support */
1301         if (sc->msix == 0)
1302                 return (ENXIO);
1303
1304         return (0);
1305 }
1306
1307 static void
1308 ixlv_free_pci_resources(struct ixlv_sc *sc)
1309 {
1310         struct ixl_vsi         *vsi = &sc->vsi;
1311         struct ixl_queue       *que = vsi->queues;
1312         device_t                dev = sc->dev;
1313
1314         /* We may get here before stations are setup */
1315         if (que == NULL)
1316                 goto early;
1317
1318         /*
1319         **  Release all msix queue resources:
1320         */
1321         for (int i = 0; i < vsi->num_queues; i++, que++) {
1322                 int rid = que->msix + 1;
1323                 if (que->tag != NULL) {
1324                         bus_teardown_intr(dev, que->res, que->tag);
1325                         que->tag = NULL;
1326                 }
1327                 if (que->res != NULL)
1328                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1329         }
1330         
1331 early:
1332         /* Clean the AdminQ interrupt */
1333         if (sc->tag != NULL) {
1334                 bus_teardown_intr(dev, sc->res, sc->tag);
1335                 sc->tag = NULL;
1336         }
1337         if (sc->res != NULL)
1338                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1339
1340         pci_release_msi(dev);
1341
1342         if (sc->msix_mem != NULL)
1343                 bus_release_resource(dev, SYS_RES_MEMORY,
1344                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1345
1346         if (sc->pci_mem != NULL)
1347                 bus_release_resource(dev, SYS_RES_MEMORY,
1348                     PCIR_BAR(0), sc->pci_mem);
1349
1350         return;
1351 }
1352
1353 /*
1354  * Create taskqueue and tasklet for Admin Queue interrupts.
1355  */
1356 static int
1357 ixlv_init_taskqueue(struct ixlv_sc *sc)
1358 {
1359         int error = 0;
1360
1361         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1362
1363         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1364             taskqueue_thread_enqueue, &sc->tq);
1365         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1366             device_get_nameunit(sc->dev));
1367
1368         return (error);
1369 }
1370
1371 /*********************************************************************
1372  *
1373  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1374  *
1375  **********************************************************************/
1376 static int
1377 ixlv_assign_msix(struct ixlv_sc *sc)
1378 {
1379         device_t        dev = sc->dev;
1380         struct          ixl_vsi *vsi = &sc->vsi;
1381         struct          ixl_queue *que = vsi->queues;
1382         struct          tx_ring  *txr;
1383         int             error, rid, vector = 1;
1384
1385         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1386                 int cpu_id = i;
1387                 rid = vector + 1;
1388                 txr = &que->txr;
1389                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1390                     RF_SHAREABLE | RF_ACTIVE);
1391                 if (que->res == NULL) {
1392                         device_printf(dev,"Unable to allocate"
1393                             " bus resource: que interrupt [%d]\n", vector);
1394                         return (ENXIO);
1395                 }
1396                 /* Set the handler function */
1397                 error = bus_setup_intr(dev, que->res,
1398                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1399                     ixlv_msix_que, que, &que->tag);
1400                 if (error) {
1401                         que->res = NULL;
1402                         device_printf(dev, "Failed to register que handler");
1403                         return (error);
1404                 }
1405                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1406                 /* Bind the vector to a CPU */
1407 #ifdef RSS
1408                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1409 #endif
1410                 bus_bind_intr(dev, que->res, cpu_id);
1411                 que->msix = vector;
1412                 vsi->que_mask |= (u64)(1 << que->msix);
1413                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1414                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1415                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1416                     taskqueue_thread_enqueue, &que->tq);
1417 #ifdef RSS
1418                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1419                     cpu_id, "%s (bucket %d)",
1420                     device_get_nameunit(dev), cpu_id);
1421 #else
1422                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1423                     "%s que", device_get_nameunit(dev));
1424 #endif
1425
1426         }
1427
1428         return (0);
1429 }
1430
1431 /*
1432 ** Requests a VF reset from the PF.
1433 **
1434 ** Requires the VF's Admin Queue to be initialized.
1435 */
1436 static int
1437 ixlv_reset(struct ixlv_sc *sc)
1438 {
1439         struct i40e_hw  *hw = &sc->hw;
1440         device_t        dev = sc->dev;
1441         int             error = 0;
1442
1443         /* Ask the PF to reset us if we are initiating */
1444         if (sc->init_state != IXLV_RESET_PENDING)
1445                 ixlv_request_reset(sc);
1446
1447         i40e_msec_delay(100);
1448         error = ixlv_reset_complete(hw);
1449         if (error) {
1450                 device_printf(dev, "%s: VF reset failed\n",
1451                     __func__);
1452                 return (error);
1453         }
1454
1455         error = i40e_shutdown_adminq(hw);
1456         if (error) {
1457                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1458                     __func__, error);
1459                 return (error);
1460         }
1461
1462         error = i40e_init_adminq(hw);
1463         if (error) {
1464                 device_printf(dev, "%s: init_adminq failed: %d\n",
1465                     __func__, error);
1466                 return(error);
1467         }
1468
1469         return (0);
1470 }
1471
1472 static int
1473 ixlv_reset_complete(struct i40e_hw *hw)
1474 {
1475         u32 reg;
1476
1477         for (int i = 0; i < 100; i++) {
1478                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1479                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1480
1481                 if ((reg == I40E_VFR_VFACTIVE) ||
1482                     (reg == I40E_VFR_COMPLETED))
1483                         return (0);
1484                 i40e_msec_delay(100);
1485         }
1486
1487         return (EBUSY);
1488 }
1489
1490
1491 /*********************************************************************
1492  *
1493  *  Setup networking device structure and register an interface.
1494  *
1495  **********************************************************************/
1496 static int
1497 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1498 {
1499         struct ifnet            *ifp;
1500         struct ixl_vsi          *vsi = &sc->vsi;
1501         struct ixl_queue        *que = vsi->queues;
1502
1503         INIT_DBG_DEV(dev, "begin");
1504
1505         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1506         if (ifp == NULL) {
1507                 device_printf(dev, "%s: could not allocate ifnet"
1508                     " structure!\n", __func__);
1509                 return (-1);
1510         }
1511
1512         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1513
1514         ifp->if_mtu = ETHERMTU;
1515         ifp->if_baudrate = 4000000000;  // ??
1516         ifp->if_init = ixlv_init;
1517         ifp->if_softc = vsi;
1518         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1519         ifp->if_ioctl = ixlv_ioctl;
1520
1521 #if __FreeBSD_version >= 1100000
1522         if_setgetcounterfn(ifp, ixl_get_counter);
1523 #endif
1524
1525         ifp->if_transmit = ixl_mq_start;
1526
1527         ifp->if_qflush = ixl_qflush;
1528         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1529
1530         ether_ifattach(ifp, sc->hw.mac.addr);
1531
1532         vsi->max_frame_size =
1533             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1534             + ETHER_VLAN_ENCAP_LEN;
1535
1536         /*
1537          * Tell the upper layer(s) we support long frames.
1538          */
1539         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1540
1541         ifp->if_capabilities |= IFCAP_HWCSUM;
1542         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1543         ifp->if_capabilities |= IFCAP_TSO;
1544         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1545
1546         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1547                              |  IFCAP_VLAN_HWTSO
1548                              |  IFCAP_VLAN_MTU
1549                              |  IFCAP_VLAN_HWCSUM
1550                              |  IFCAP_LRO;
1551         ifp->if_capenable = ifp->if_capabilities;
1552
1553         /*
1554         ** Don't turn this on by default, if vlans are
1555         ** created on another pseudo device (eg. lagg)
1556         ** then vlan events are not passed thru, breaking
1557         ** operation, but with HW FILTER off it works. If
1558         ** using vlans directly on the ixl driver you can
1559         ** enable this and get full hardware tag filtering.
1560         */
1561         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1562
1563         /*
1564          * Specify the media types supported by this adapter and register
1565          * callbacks to update media and link information
1566          */
1567         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1568                      ixlv_media_status);
1569
1570         // JFV Add media types later?
1571
1572         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1573         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1574
1575         INIT_DBG_DEV(dev, "end");
1576         return (0);
1577 }
1578
1579 /*
1580 ** Allocate and setup the interface queues
1581 */
1582 static int
1583 ixlv_setup_queues(struct ixlv_sc *sc)
1584 {
1585         device_t                dev = sc->dev;
1586         struct ixl_vsi          *vsi;
1587         struct ixl_queue        *que;
1588         struct tx_ring          *txr;
1589         struct rx_ring          *rxr;
1590         int                     rsize, tsize;
1591         int                     error = I40E_SUCCESS;
1592
1593         vsi = &sc->vsi;
1594         vsi->back = (void *)sc;
1595         vsi->hw = &sc->hw;
1596         vsi->num_vlans = 0;
1597
1598         /* Get memory for the station queues */
1599         if (!(vsi->queues =
1600                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1601                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1602                         device_printf(dev, "Unable to allocate queue memory\n");
1603                         error = ENOMEM;
1604                         goto early;
1605         }
1606
1607         for (int i = 0; i < vsi->num_queues; i++) {
1608                 que = &vsi->queues[i];
1609                 que->num_desc = ixlv_ringsz;
1610                 que->me = i;
1611                 que->vsi = vsi;
1612                 /* mark the queue as active */
1613                 vsi->active_queues |= (u64)1 << que->me;
1614
1615                 txr = &que->txr;
1616                 txr->que = que;
1617                 txr->tail = I40E_QTX_TAIL1(que->me);
1618                 /* Initialize the TX lock */
1619                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1620                     device_get_nameunit(dev), que->me);
1621                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1622                 /*
1623                 ** Create the TX descriptor ring, the extra int is
1624                 ** added as the location for HEAD WB.
1625                 */
1626                 tsize = roundup2((que->num_desc *
1627                     sizeof(struct i40e_tx_desc)) +
1628                     sizeof(u32), DBA_ALIGN);
1629                 if (i40e_allocate_dma_mem(&sc->hw,
1630                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1631                         device_printf(dev,
1632                             "Unable to allocate TX Descriptor memory\n");
1633                         error = ENOMEM;
1634                         goto fail;
1635                 }
1636                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1637                 bzero((void *)txr->base, tsize);
1638                 /* Now allocate transmit soft structs for the ring */
1639                 if (ixl_allocate_tx_data(que)) {
1640                         device_printf(dev,
1641                             "Critical Failure setting up TX structures\n");
1642                         error = ENOMEM;
1643                         goto fail;
1644                 }
1645                 /* Allocate a buf ring */
1646                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1647                     M_WAITOK, &txr->mtx);
1648                 if (txr->br == NULL) {
1649                         device_printf(dev,
1650                             "Critical Failure setting up TX buf ring\n");
1651                         error = ENOMEM;
1652                         goto fail;
1653                 }
1654
1655                 /*
1656                  * Next the RX queues...
1657                  */ 
1658                 rsize = roundup2(que->num_desc *
1659                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1660                 rxr = &que->rxr;
1661                 rxr->que = que;
1662                 rxr->tail = I40E_QRX_TAIL1(que->me);
1663
1664                 /* Initialize the RX side lock */
1665                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1666                     device_get_nameunit(dev), que->me);
1667                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1668
1669                 if (i40e_allocate_dma_mem(&sc->hw,
1670                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1671                         device_printf(dev,
1672                             "Unable to allocate RX Descriptor memory\n");
1673                         error = ENOMEM;
1674                         goto fail;
1675                 }
1676                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1677                 bzero((void *)rxr->base, rsize);
1678
1679                 /* Allocate receive soft structs for the ring*/
1680                 if (ixl_allocate_rx_data(que)) {
1681                         device_printf(dev,
1682                             "Critical Failure setting up receive structs\n");
1683                         error = ENOMEM;
1684                         goto fail;
1685                 }
1686         }
1687
1688         return (0);
1689
1690 fail:
1691         for (int i = 0; i < vsi->num_queues; i++) {
1692                 que = &vsi->queues[i];
1693                 rxr = &que->rxr;
1694                 txr = &que->txr;
1695                 if (rxr->base)
1696                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1697                 if (txr->base)
1698                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1699         }
1700         free(vsi->queues, M_DEVBUF);
1701
1702 early:
1703         return (error);
1704 }
1705
1706 /*
1707 ** This routine is run via an vlan config EVENT,
1708 ** it enables us to use the HW Filter table since
1709 ** we can get the vlan id. This just creates the
1710 ** entry in the soft version of the VFTA, init will
1711 ** repopulate the real table.
1712 */
1713 static void
1714 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1715 {
1716         struct ixl_vsi          *vsi = arg;
1717         struct ixlv_sc          *sc = vsi->back;
1718         struct ixlv_vlan_filter *v;
1719
1720
1721         if (ifp->if_softc != arg)   /* Not our event */
1722                 return;
1723
1724         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1725                 return;
1726
1727         /* Sanity check - make sure it doesn't already exist */
1728         SLIST_FOREACH(v, sc->vlan_filters, next) {
1729                 if (v->vlan == vtag)
1730                         return;
1731         }
1732
1733         mtx_lock(&sc->mtx);
1734         ++vsi->num_vlans;
1735         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1736         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1737         v->vlan = vtag;
1738         v->flags = IXL_FILTER_ADD;
1739         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1740             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1741         mtx_unlock(&sc->mtx);
1742         return;
1743 }
1744
1745 /*
1746 ** This routine is run via an vlan
1747 ** unconfig EVENT, remove our entry
1748 ** in the soft vfta.
1749 */
1750 static void
1751 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1752 {
1753         struct ixl_vsi          *vsi = arg;
1754         struct ixlv_sc          *sc = vsi->back;
1755         struct ixlv_vlan_filter *v;
1756         int                     i = 0;
1757         
1758         if (ifp->if_softc != arg)
1759                 return;
1760
1761         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1762                 return;
1763
1764         mtx_lock(&sc->mtx);
1765         SLIST_FOREACH(v, sc->vlan_filters, next) {
1766                 if (v->vlan == vtag) {
1767                         v->flags = IXL_FILTER_DEL;
1768                         ++i;
1769                         --vsi->num_vlans;
1770                 }
1771         }
1772         if (i)
1773                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1774                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1775         mtx_unlock(&sc->mtx);
1776         return;
1777 }
1778
1779 /*
1780 ** Get a new filter and add it to the mac filter list.
1781 */
1782 static struct ixlv_mac_filter *
1783 ixlv_get_mac_filter(struct ixlv_sc *sc)
1784 {
1785         struct ixlv_mac_filter  *f;
1786
1787         f = malloc(sizeof(struct ixlv_mac_filter),
1788             M_DEVBUF, M_NOWAIT | M_ZERO);
1789         if (f)
1790                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1791
1792         return (f);
1793 }
1794
1795 /*
1796 ** Find the filter with matching MAC address
1797 */
1798 static struct ixlv_mac_filter *
1799 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1800 {
1801         struct ixlv_mac_filter  *f;
1802         bool                            match = FALSE;
1803
1804         SLIST_FOREACH(f, sc->mac_filters, next) {
1805                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1806                         match = TRUE;
1807                         break;
1808                 }
1809         }       
1810
1811         if (!match)
1812                 f = NULL;
1813         return (f);
1814 }
1815
1816 /*
1817 ** Admin Queue interrupt handler
1818 */
1819 static void
1820 ixlv_msix_adminq(void *arg)
1821 {
1822         struct ixlv_sc  *sc = arg;
1823         struct i40e_hw  *hw = &sc->hw;
1824         u32             reg, mask;
1825
1826         reg = rd32(hw, I40E_VFINT_ICR01);
1827         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1828
1829         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1830         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1831         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1832
1833         /* schedule task */
1834         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1835         return;
1836 }
1837
1838 void
1839 ixlv_enable_intr(struct ixl_vsi *vsi)
1840 {
1841         struct i40e_hw          *hw = vsi->hw;
1842         struct ixl_queue        *que = vsi->queues;
1843
1844         ixlv_enable_adminq_irq(hw);
1845         for (int i = 0; i < vsi->num_queues; i++, que++)
1846                 ixlv_enable_queue_irq(hw, que->me);
1847 }
1848
1849 void
1850 ixlv_disable_intr(struct ixl_vsi *vsi)
1851 {
1852         struct i40e_hw          *hw = vsi->hw;
1853         struct ixl_queue       *que = vsi->queues;
1854
1855         ixlv_disable_adminq_irq(hw);
1856         for (int i = 0; i < vsi->num_queues; i++, que++)
1857                 ixlv_disable_queue_irq(hw, que->me);
1858 }
1859
1860
1861 static void
1862 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1863 {
1864         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1865         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1866         /* flush */
1867         rd32(hw, I40E_VFGEN_RSTAT);
1868         return;
1869 }
1870
1871 static void
1872 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1873 {
1874         wr32(hw, I40E_VFINT_DYN_CTL01,
1875             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1876             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1877         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1878         /* flush */
1879         rd32(hw, I40E_VFGEN_RSTAT);
1880         return;
1881 }
1882
1883 static void
1884 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1885 {
1886         u32             reg;
1887
1888         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1889             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; 
1890         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1891 }
1892
1893 static void
1894 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1895 {
1896         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1897         rd32(hw, I40E_VFGEN_RSTAT);
1898         return;
1899 }
1900
1901
1902 /*
1903 ** Provide a update to the queue RX
1904 ** interrupt moderation value.
1905 */
1906 static void
1907 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1908 {
1909         struct ixl_vsi  *vsi = que->vsi;
1910         struct i40e_hw  *hw = vsi->hw;
1911         struct rx_ring  *rxr = &que->rxr;
1912         u16             rx_itr;
1913         u16             rx_latency = 0;
1914         int             rx_bytes;
1915
1916
1917         /* Idle, do nothing */
1918         if (rxr->bytes == 0)
1919                 return;
1920
1921         if (ixlv_dynamic_rx_itr) {
1922                 rx_bytes = rxr->bytes/rxr->itr;
1923                 rx_itr = rxr->itr;
1924
1925                 /* Adjust latency range */
1926                 switch (rxr->latency) {
1927                 case IXL_LOW_LATENCY:
1928                         if (rx_bytes > 10) {
1929                                 rx_latency = IXL_AVE_LATENCY;
1930                                 rx_itr = IXL_ITR_20K;
1931                         }
1932                         break;
1933                 case IXL_AVE_LATENCY:
1934                         if (rx_bytes > 20) {
1935                                 rx_latency = IXL_BULK_LATENCY;
1936                                 rx_itr = IXL_ITR_8K;
1937                         } else if (rx_bytes <= 10) {
1938                                 rx_latency = IXL_LOW_LATENCY;
1939                                 rx_itr = IXL_ITR_100K;
1940                         }
1941                         break;
1942                 case IXL_BULK_LATENCY:
1943                         if (rx_bytes <= 20) {
1944                                 rx_latency = IXL_AVE_LATENCY;
1945                                 rx_itr = IXL_ITR_20K;
1946                         }
1947                         break;
1948                  }
1949
1950                 rxr->latency = rx_latency;
1951
1952                 if (rx_itr != rxr->itr) {
1953                         /* do an exponential smoothing */
1954                         rx_itr = (10 * rx_itr * rxr->itr) /
1955                             ((9 * rx_itr) + rxr->itr);
1956                         rxr->itr = rx_itr & IXL_MAX_ITR;
1957                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1958                             que->me), rxr->itr);
1959                 }
1960         } else { /* We may have have toggled to non-dynamic */
1961                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1962                         vsi->rx_itr_setting = ixlv_rx_itr;
1963                 /* Update the hardware if needed */
1964                 if (rxr->itr != vsi->rx_itr_setting) {
1965                         rxr->itr = vsi->rx_itr_setting;
1966                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1967                             que->me), rxr->itr);
1968                 }
1969         }
1970         rxr->bytes = 0;
1971         rxr->packets = 0;
1972         return;
1973 }
1974
1975
1976 /*
1977 ** Provide a update to the queue TX
1978 ** interrupt moderation value.
1979 */
1980 static void
1981 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1982 {
1983         struct ixl_vsi  *vsi = que->vsi;
1984         struct i40e_hw  *hw = vsi->hw;
1985         struct tx_ring  *txr = &que->txr;
1986         u16             tx_itr;
1987         u16             tx_latency = 0;
1988         int             tx_bytes;
1989
1990
1991         /* Idle, do nothing */
1992         if (txr->bytes == 0)
1993                 return;
1994
1995         if (ixlv_dynamic_tx_itr) {
1996                 tx_bytes = txr->bytes/txr->itr;
1997                 tx_itr = txr->itr;
1998
1999                 switch (txr->latency) {
2000                 case IXL_LOW_LATENCY:
2001                         if (tx_bytes > 10) {
2002                                 tx_latency = IXL_AVE_LATENCY;
2003                                 tx_itr = IXL_ITR_20K;
2004                         }
2005                         break;
2006                 case IXL_AVE_LATENCY:
2007                         if (tx_bytes > 20) {
2008                                 tx_latency = IXL_BULK_LATENCY;
2009                                 tx_itr = IXL_ITR_8K;
2010                         } else if (tx_bytes <= 10) {
2011                                 tx_latency = IXL_LOW_LATENCY;
2012                                 tx_itr = IXL_ITR_100K;
2013                         }
2014                         break;
2015                 case IXL_BULK_LATENCY:
2016                         if (tx_bytes <= 20) {
2017                                 tx_latency = IXL_AVE_LATENCY;
2018                                 tx_itr = IXL_ITR_20K;
2019                         }
2020                         break;
2021                 }
2022
2023                 txr->latency = tx_latency;
2024
2025                 if (tx_itr != txr->itr) {
2026                  /* do an exponential smoothing */
2027                         tx_itr = (10 * tx_itr * txr->itr) /
2028                             ((9 * tx_itr) + txr->itr);
2029                         txr->itr = tx_itr & IXL_MAX_ITR;
2030                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2031                             que->me), txr->itr);
2032                 }
2033
2034         } else { /* We may have have toggled to non-dynamic */
2035                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2036                         vsi->tx_itr_setting = ixlv_tx_itr;
2037                 /* Update the hardware if needed */
2038                 if (txr->itr != vsi->tx_itr_setting) {
2039                         txr->itr = vsi->tx_itr_setting;
2040                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2041                             que->me), txr->itr);
2042                 }
2043         }
2044         txr->bytes = 0;
2045         txr->packets = 0;
2046         return;
2047 }
2048
2049
2050 /*
2051 **
2052 ** MSIX Interrupt Handlers and Tasklets
2053 **
2054 */
2055 static void
2056 ixlv_handle_que(void *context, int pending)
2057 {
2058         struct ixl_queue *que = context;
2059         struct ixl_vsi *vsi = que->vsi;
2060         struct i40e_hw  *hw = vsi->hw;
2061         struct tx_ring  *txr = &que->txr;
2062         struct ifnet    *ifp = vsi->ifp;
2063         bool            more;
2064
2065         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2066                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2067                 mtx_lock(&txr->mtx);
2068                 ixl_txeof(que);
2069                 if (!drbr_empty(ifp, txr->br))
2070                         ixl_mq_start_locked(ifp, txr);
2071                 mtx_unlock(&txr->mtx);
2072                 if (more) {
2073                         taskqueue_enqueue(que->tq, &que->task);
2074                         return;
2075                 }
2076         }
2077
2078         /* Reenable this interrupt - hmmm */
2079         ixlv_enable_queue_irq(hw, que->me);
2080         return;
2081 }
2082
2083
2084 /*********************************************************************
2085  *
2086  *  MSIX Queue Interrupt Service routine
2087  *
2088  **********************************************************************/
2089 static void
2090 ixlv_msix_que(void *arg)
2091 {
2092         struct ixl_queue        *que = arg;
2093         struct ixl_vsi  *vsi = que->vsi;
2094         struct i40e_hw  *hw = vsi->hw;
2095         struct tx_ring  *txr = &que->txr;
2096         bool            more_tx, more_rx;
2097
2098         /* Spurious interrupts are ignored */
2099         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2100                 return;
2101
2102         ++que->irqs;
2103
2104         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2105
2106         mtx_lock(&txr->mtx);
2107         more_tx = ixl_txeof(que);
2108         /*
2109         ** Make certain that if the stack 
2110         ** has anything queued the task gets
2111         ** scheduled to handle it.
2112         */
2113         if (!drbr_empty(vsi->ifp, txr->br))
2114                 more_tx = 1;
2115         mtx_unlock(&txr->mtx);
2116
2117         ixlv_set_queue_rx_itr(que);
2118         ixlv_set_queue_tx_itr(que);
2119
2120         if (more_tx || more_rx)
2121                 taskqueue_enqueue(que->tq, &que->task);
2122         else
2123                 ixlv_enable_queue_irq(hw, que->me);
2124
2125         return;
2126 }
2127
2128
2129 /*********************************************************************
2130  *
2131  *  Media Ioctl callback
2132  *
2133  *  This routine is called whenever the user queries the status of
2134  *  the interface using ifconfig.
2135  *
2136  **********************************************************************/
2137 static void
2138 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2139 {
2140         struct ixl_vsi          *vsi = ifp->if_softc;
2141         struct ixlv_sc  *sc = vsi->back;
2142
2143         INIT_DBG_IF(ifp, "begin");
2144
2145         mtx_lock(&sc->mtx);
2146
2147         ixlv_update_link_status(sc);
2148
2149         ifmr->ifm_status = IFM_AVALID;
2150         ifmr->ifm_active = IFM_ETHER;
2151
2152         if (!sc->link_up) {
2153                 mtx_unlock(&sc->mtx);
2154                 INIT_DBG_IF(ifp, "end: link not up");
2155                 return;
2156         }
2157
2158         ifmr->ifm_status |= IFM_ACTIVE;
2159         /* Hardware is always full-duplex */
2160         ifmr->ifm_active |= IFM_FDX;
2161         mtx_unlock(&sc->mtx);
2162         INIT_DBG_IF(ifp, "end");
2163         return;
2164 }
2165
2166 /*********************************************************************
2167  *
2168  *  Media Ioctl callback
2169  *
2170  *  This routine is called when the user changes speed/duplex using
2171  *  media/mediopt option with ifconfig.
2172  *
2173  **********************************************************************/
2174 static int
2175 ixlv_media_change(struct ifnet * ifp)
2176 {
2177         struct ixl_vsi *vsi = ifp->if_softc;
2178         struct ifmedia *ifm = &vsi->media;
2179
2180         INIT_DBG_IF(ifp, "begin");
2181
2182         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2183                 return (EINVAL);
2184
2185         INIT_DBG_IF(ifp, "end");
2186         return (0);
2187 }
2188
2189
2190 /*********************************************************************
2191  *  Multicast Initialization
2192  *
2193  *  This routine is called by init to reset a fresh state.
2194  *
2195  **********************************************************************/
2196
2197 static void
2198 ixlv_init_multi(struct ixl_vsi *vsi)
2199 {
2200         struct ixlv_mac_filter *f;
2201         struct ixlv_sc  *sc = vsi->back;
2202         int                     mcnt = 0;
2203
2204         IOCTL_DBG_IF(vsi->ifp, "begin");
2205
2206         /* First clear any multicast filters */
2207         SLIST_FOREACH(f, sc->mac_filters, next) {
2208                 if ((f->flags & IXL_FILTER_USED)
2209                     && (f->flags & IXL_FILTER_MC)) {
2210                         f->flags |= IXL_FILTER_DEL;
2211                         mcnt++;
2212                 }
2213         }
2214         if (mcnt > 0)
2215                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2216                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2217                     sc);
2218
2219         IOCTL_DBG_IF(vsi->ifp, "end");
2220 }
2221
2222 static void
2223 ixlv_add_multi(struct ixl_vsi *vsi)
2224 {
2225         struct ifmultiaddr      *ifma;
2226         struct ifnet            *ifp = vsi->ifp;
2227         struct ixlv_sc  *sc = vsi->back;
2228         int                     mcnt = 0;
2229
2230         IOCTL_DBG_IF(ifp, "begin");
2231
2232         if_maddr_rlock(ifp);
2233         /*
2234         ** Get a count, to decide if we
2235         ** simply use multicast promiscuous.
2236         */
2237         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2238                 if (ifma->ifma_addr->sa_family != AF_LINK)
2239                         continue;
2240                 mcnt++;
2241         }
2242         if_maddr_runlock(ifp);
2243
2244         // TODO: Remove -- cannot set promiscuous mode in a VF
2245         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2246                 /* delete all multicast filters */
2247                 ixlv_init_multi(vsi);
2248                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2249                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2250                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2251                     sc);
2252                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2253                 return;
2254         }
2255
2256         mcnt = 0;
2257         if_maddr_rlock(ifp);
2258         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2259                 if (ifma->ifma_addr->sa_family != AF_LINK)
2260                         continue;
2261                 if (!ixlv_add_mac_filter(sc,
2262                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2263                     IXL_FILTER_MC))
2264                         mcnt++;
2265         }
2266         if_maddr_runlock(ifp);
2267         /*
2268         ** Notify AQ task that sw filters need to be
2269         ** added to hw list
2270         */
2271         if (mcnt > 0)
2272                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2273                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2274                     sc);
2275
2276         IOCTL_DBG_IF(ifp, "end");
2277 }
2278
2279 static void
2280 ixlv_del_multi(struct ixl_vsi *vsi)
2281 {
2282         struct ixlv_mac_filter *f;
2283         struct ifmultiaddr      *ifma;
2284         struct ifnet            *ifp = vsi->ifp;
2285         struct ixlv_sc  *sc = vsi->back;
2286         int                     mcnt = 0;
2287         bool            match = FALSE;
2288
2289         IOCTL_DBG_IF(ifp, "begin");
2290
2291         /* Search for removed multicast addresses */
2292         if_maddr_rlock(ifp);
2293         SLIST_FOREACH(f, sc->mac_filters, next) {
2294                 if ((f->flags & IXL_FILTER_USED)
2295                     && (f->flags & IXL_FILTER_MC)) {
2296                         /* check if mac address in filter is in sc's list */
2297                         match = FALSE;
2298                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2299                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2300                                         continue;
2301                                 u8 *mc_addr =
2302                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2303                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2304                                         match = TRUE;
2305                                         break;
2306                                 }
2307                         }
2308                         /* if this filter is not in the sc's list, remove it */
2309                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2310                                 f->flags |= IXL_FILTER_DEL;
2311                                 mcnt++;
2312                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2313                                     MAC_FORMAT_ARGS(f->macaddr));
2314                         }
2315                         else if (match == FALSE)
2316                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2317                                     MAC_FORMAT_ARGS(f->macaddr));
2318                 }
2319         }
2320         if_maddr_runlock(ifp);
2321
2322         if (mcnt > 0)
2323                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2324                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2325                     sc);
2326
2327         IOCTL_DBG_IF(ifp, "end");
2328 }
2329
2330 /*********************************************************************
2331  *  Timer routine
2332  *
2333  *  This routine checks for link status,updates statistics,
2334  *  and runs the watchdog check.
2335  *
2336  **********************************************************************/
2337
2338 static void
2339 ixlv_local_timer(void *arg)
2340 {
2341         struct ixlv_sc  *sc = arg;
2342         struct i40e_hw          *hw = &sc->hw;
2343         struct ixl_vsi          *vsi = &sc->vsi;
2344         struct ixl_queue        *que = vsi->queues;
2345         device_t                dev = sc->dev;
2346         int                     hung = 0;
2347         u32                     mask, val;
2348
2349         IXLV_CORE_LOCK_ASSERT(sc);
2350
2351         /* If Reset is in progress just bail */
2352         if (sc->init_state == IXLV_RESET_PENDING)
2353                 return;
2354
2355         /* Check for when PF triggers a VF reset */
2356         val = rd32(hw, I40E_VFGEN_RSTAT) &
2357             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2358
2359         if (val != I40E_VFR_VFACTIVE
2360             && val != I40E_VFR_COMPLETED) {
2361                 DDPRINTF(dev, "reset in progress! (%d)", val);
2362                 return;
2363         }
2364
2365         ixlv_request_stats(sc);
2366
2367         /* clean and process any events */
2368         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2369
2370         /*
2371         ** Check status on the queues for a hang
2372         */
2373         mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2374             I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2375
2376         for (int i = 0; i < vsi->num_queues; i++,que++) {
2377                 /* Any queues with outstanding work get a sw irq */
2378                 if (que->busy)
2379                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2380                 /*
2381                 ** Each time txeof runs without cleaning, but there
2382                 ** are uncleaned descriptors it increments busy. If
2383                 ** we get to 5 we declare it hung.
2384                 */
2385                 if (que->busy == IXL_QUEUE_HUNG) {
2386                         ++hung;
2387                         /* Mark the queue as inactive */
2388                         vsi->active_queues &= ~((u64)1 << que->me);
2389                         continue;
2390                 } else {
2391                         /* Check if we've come back from hung */
2392                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2393                                 vsi->active_queues |= ((u64)1 << que->me);
2394                 }
2395                 if (que->busy >= IXL_MAX_TX_BUSY) {
2396                         device_printf(dev,"Warning queue %d "
2397                             "appears to be hung!\n", i);
2398                         que->busy = IXL_QUEUE_HUNG;
2399                         ++hung;
2400                 }
2401         }
2402         /* Only reset when all queues show hung */
2403         if (hung == vsi->num_queues)
2404                 goto hung;
2405         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2406         return;
2407
2408 hung:
2409         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2410         sc->init_state = IXLV_RESET_REQUIRED;
2411         ixlv_init_locked(sc);
2412 }
2413
2414 /*
2415 ** Note: this routine updates the OS on the link state
2416 **      the real check of the hardware only happens with
2417 **      a link interrupt.
2418 */
2419 void
2420 ixlv_update_link_status(struct ixlv_sc *sc)
2421 {
2422         struct ixl_vsi          *vsi = &sc->vsi;
2423         struct ifnet            *ifp = vsi->ifp;
2424
2425         if (sc->link_up){ 
2426                 if (vsi->link_active == FALSE) {
2427                         if (bootverbose)
2428                                 if_printf(ifp,"Link is Up, %d Gbps\n",
2429                                     (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2430                         vsi->link_active = TRUE;
2431                         if_link_state_change(ifp, LINK_STATE_UP);
2432                 }
2433         } else { /* Link down */
2434                 if (vsi->link_active == TRUE) {
2435                         if (bootverbose)
2436                                 if_printf(ifp,"Link is Down\n");
2437                         if_link_state_change(ifp, LINK_STATE_DOWN);
2438                         vsi->link_active = FALSE;
2439                 }
2440         }
2441
2442         return;
2443 }
2444
2445 /*********************************************************************
2446  *
2447  *  This routine disables all traffic on the adapter by issuing a
2448  *  global reset on the MAC and deallocates TX/RX buffers.
2449  *
2450  **********************************************************************/
2451
2452 static void
2453 ixlv_stop(struct ixlv_sc *sc)
2454 {
2455         struct ifnet *ifp;
2456         int start;
2457
2458         ifp = sc->vsi.ifp;
2459         INIT_DBG_IF(ifp, "begin");
2460
2461         IXLV_CORE_LOCK_ASSERT(sc);
2462
2463         ixl_vc_flush(&sc->vc_mgr);
2464         ixlv_disable_queues(sc);
2465
2466         start = ticks;
2467         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2468             ((ticks - start) < hz/10))
2469                 ixlv_do_adminq_locked(sc);
2470
2471         /* Stop the local timer */
2472         callout_stop(&sc->timer);
2473
2474         INIT_DBG_IF(ifp, "end");
2475 }
2476
2477
2478 /*********************************************************************
2479  *
2480  *  Free all station queue structs.
2481  *
2482  **********************************************************************/
2483 static void
2484 ixlv_free_queues(struct ixl_vsi *vsi)
2485 {
2486         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2487         struct ixl_queue        *que = vsi->queues;
2488
2489         for (int i = 0; i < vsi->num_queues; i++, que++) {
2490                 struct tx_ring *txr = &que->txr;
2491                 struct rx_ring *rxr = &que->rxr;
2492         
2493                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2494                         continue;
2495                 IXL_TX_LOCK(txr);
2496                 ixl_free_que_tx(que);
2497                 if (txr->base)
2498                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2499                 IXL_TX_UNLOCK(txr);
2500                 IXL_TX_LOCK_DESTROY(txr);
2501
2502                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2503                         continue;
2504                 IXL_RX_LOCK(rxr);
2505                 ixl_free_que_rx(que);
2506                 if (rxr->base)
2507                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2508                 IXL_RX_UNLOCK(rxr);
2509                 IXL_RX_LOCK_DESTROY(rxr);
2510                 
2511         }
2512         free(vsi->queues, M_DEVBUF);
2513 }
2514
2515
2516 /*
2517 ** ixlv_config_rss - setup RSS 
2518 **
2519 ** RSS keys and table are cleared on VF reset.
2520 */
2521 static void
2522 ixlv_config_rss(struct ixlv_sc *sc)
2523 {
2524         struct i40e_hw  *hw = &sc->hw;
2525         struct ixl_vsi  *vsi = &sc->vsi;
2526         u32             lut = 0;
2527         u64             set_hena = 0, hena;
2528         int             i, j, que_id;
2529 #ifdef RSS
2530         u32             rss_hash_config;
2531         u32             rss_seed[IXL_KEYSZ];
2532 #else
2533         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2534                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2535                             0x35897377, 0x328b25e1, 0x4fa98922,
2536                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2537 #endif
2538         
2539         /* Don't set up RSS if using a single queue */
2540         if (vsi->num_queues == 1) {
2541                 wr32(hw, I40E_VFQF_HENA(0), 0);
2542                 wr32(hw, I40E_VFQF_HENA(1), 0);
2543                 ixl_flush(hw);
2544                 return;
2545         }
2546
2547 #ifdef RSS
2548         /* Fetch the configured RSS key */
2549         rss_getkey((uint8_t *) &rss_seed);
2550 #endif
2551         /* Fill out hash function seed */
2552         for (i = 0; i <= IXL_KEYSZ; i++)
2553                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2554
2555         /* Enable PCTYPES for RSS: */
2556 #ifdef RSS
2557         rss_hash_config = rss_gethashconfig();
2558         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2559                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2560         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2561                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2562         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2563                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2564         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2565                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2566         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2567                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2568         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2569                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2570         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2571                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2572 #else
2573         set_hena =
2574                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2575                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2576                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2577                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2578                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2579                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2580                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2581                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2582                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2583                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2584                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2585 #endif
2586         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2587             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2588         hena |= set_hena;
2589         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2590         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2591
2592         /* Populate the LUT with max no. of queues in round robin fashion */
2593         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2594                 if (j == vsi->num_queues)
2595                         j = 0;
2596 #ifdef RSS
2597                 /*
2598                  * Fetch the RSS bucket id for the given indirection entry.
2599                  * Cap it at the number of configured buckets (which is
2600                  * num_queues.)
2601                  */
2602                 que_id = rss_get_indirection_to_bucket(i);
2603                 que_id = que_id % vsi->num_queues;
2604 #else
2605                 que_id = j;
2606 #endif
2607                 /* lut = 4-byte sliding window of 4 lut entries */
2608                 lut = (lut << 8) | (que_id & 0xF);
2609                 /* On i = 3, we have 4 entries in lut; write to the register */
2610                 if ((i & 3) == 3) {
2611                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2612                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2613                 }
2614         }
2615         ixl_flush(hw);
2616 }
2617
2618
2619 /*
2620 ** This routine refreshes vlan filters, called by init
2621 ** it scans the filter table and then updates the AQ
2622 */
2623 static void
2624 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2625 {
2626         struct ixl_vsi                  *vsi = &sc->vsi;
2627         struct ixlv_vlan_filter *f;
2628         int                             cnt = 0;
2629
2630         if (vsi->num_vlans == 0)
2631                 return;
2632         /*
2633         ** Scan the filter table for vlan entries,
2634         ** and if found call for the AQ update.
2635         */
2636         SLIST_FOREACH(f, sc->vlan_filters, next)
2637                 if (f->flags & IXL_FILTER_ADD)
2638                         cnt++;
2639         if (cnt > 0)
2640                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2641                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2642 }
2643
2644
2645 /*
2646 ** This routine adds new MAC filters to the sc's list;
2647 ** these are later added in hardware by sending a virtual
2648 ** channel message.
2649 */
2650 static int
2651 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2652 {
2653         struct ixlv_mac_filter  *f;
2654
2655         /* Does one already exist? */
2656         f = ixlv_find_mac_filter(sc, macaddr);
2657         if (f != NULL) {
2658                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2659                     MAC_FORMAT_ARGS(macaddr));
2660                 return (EEXIST);
2661         }
2662
2663         /* If not, get a new empty filter */
2664         f = ixlv_get_mac_filter(sc);
2665         if (f == NULL) {
2666                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2667                     __func__);
2668                 return (ENOMEM);
2669         }
2670
2671         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2672             MAC_FORMAT_ARGS(macaddr));
2673
2674         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2675         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2676         f->flags |= flags;
2677         return (0);
2678 }
2679
2680 /*
2681 ** Marks a MAC filter for deletion.
2682 */
2683 static int
2684 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2685 {
2686         struct ixlv_mac_filter  *f;
2687
2688         f = ixlv_find_mac_filter(sc, macaddr);
2689         if (f == NULL)
2690                 return (ENOENT);
2691
2692         f->flags |= IXL_FILTER_DEL;
2693         return (0);
2694 }
2695
2696 /*
2697 ** Tasklet handler for MSIX Adminq interrupts
2698 **  - done outside interrupt context since it might sleep
2699 */
2700 static void
2701 ixlv_do_adminq(void *context, int pending)
2702 {
2703         struct ixlv_sc          *sc = context;
2704
2705         mtx_lock(&sc->mtx);
2706         ixlv_do_adminq_locked(sc);
2707         mtx_unlock(&sc->mtx);
2708         return;
2709 }
2710
2711 static void
2712 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2713 {
2714         struct i40e_hw                  *hw = &sc->hw;
2715         struct i40e_arq_event_info      event;
2716         struct i40e_virtchnl_msg        *v_msg;
2717         device_t                        dev = sc->dev;
2718         u16                             result = 0;
2719         u32                             reg, oldreg;
2720         i40e_status                     ret;
2721
2722         IXLV_CORE_LOCK_ASSERT(sc);
2723
2724         event.buf_len = IXL_AQ_BUF_SZ;
2725         event.msg_buf = sc->aq_buffer;
2726         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2727
2728         do {
2729                 ret = i40e_clean_arq_element(hw, &event, &result);
2730                 if (ret)
2731                         break;
2732                 ixlv_vc_completion(sc, v_msg->v_opcode,
2733                     v_msg->v_retval, event.msg_buf, event.msg_len);
2734                 if (result != 0)
2735                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2736         } while (result);
2737
2738         /* check for Admin queue errors */
2739         oldreg = reg = rd32(hw, hw->aq.arq.len);
2740         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2741                 device_printf(dev, "ARQ VF Error detected\n");
2742                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2743         }
2744         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2745                 device_printf(dev, "ARQ Overflow Error detected\n");
2746                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2747         }
2748         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2749                 device_printf(dev, "ARQ Critical Error detected\n");
2750                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2751         }
2752         if (oldreg != reg)
2753                 wr32(hw, hw->aq.arq.len, reg);
2754
2755         oldreg = reg = rd32(hw, hw->aq.asq.len);
2756         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2757                 device_printf(dev, "ASQ VF Error detected\n");
2758                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2759         }
2760         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2761                 device_printf(dev, "ASQ Overflow Error detected\n");
2762                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2763         }
2764         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2765                 device_printf(dev, "ASQ Critical Error detected\n");
2766                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2767         }
2768         if (oldreg != reg)
2769                 wr32(hw, hw->aq.asq.len, reg);
2770
2771         ixlv_enable_adminq_irq(hw);
2772 }
2773
2774 static void
2775 ixlv_add_sysctls(struct ixlv_sc *sc)
2776 {
2777         device_t dev = sc->dev;
2778         struct ixl_vsi *vsi = &sc->vsi;
2779         struct i40e_eth_stats *es = &vsi->eth_stats;
2780
2781         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2782         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2783         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2784
2785         struct sysctl_oid *vsi_node, *queue_node;
2786         struct sysctl_oid_list *vsi_list, *queue_list;
2787
2788 #define QUEUE_NAME_LEN 32
2789         char queue_namebuf[QUEUE_NAME_LEN];
2790
2791         struct ixl_queue *queues = vsi->queues;
2792         struct tx_ring *txr;
2793         struct rx_ring *rxr;
2794
2795         /* Driver statistics sysctls */
2796         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2797                         CTLFLAG_RD, &sc->watchdog_events,
2798                         "Watchdog timeouts");
2799         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2800                         CTLFLAG_RD, &sc->admin_irq,
2801                         "Admin Queue IRQ Handled");
2802
2803         /* VSI statistics sysctls */
2804         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2805                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2806         vsi_list = SYSCTL_CHILDREN(vsi_node);
2807
2808         struct ixl_sysctl_info ctls[] =
2809         {
2810                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2811                 {&es->rx_unicast, "ucast_pkts_rcvd",
2812                         "Unicast Packets Received"},
2813                 {&es->rx_multicast, "mcast_pkts_rcvd",
2814                         "Multicast Packets Received"},
2815                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2816                         "Broadcast Packets Received"},
2817                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2818                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2819                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2820                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2821                 {&es->tx_multicast, "mcast_pkts_txd",
2822                         "Multicast Packets Transmitted"},
2823                 {&es->tx_broadcast, "bcast_pkts_txd",
2824                         "Broadcast Packets Transmitted"},
2825                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2826                 // end
2827                 {0,0,0}
2828         };
2829         struct ixl_sysctl_info *entry = ctls;
2830         while (entry->stat != 0)
2831         {
2832                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2833                                 CTLFLAG_RD, entry->stat,
2834                                 entry->description);
2835                 entry++;
2836         }
2837
2838         /* Queue sysctls */
2839         for (int q = 0; q < vsi->num_queues; q++) {
2840                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2841                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2842                                              CTLFLAG_RD, NULL, "Queue Name");
2843                 queue_list = SYSCTL_CHILDREN(queue_node);
2844
2845                 txr = &(queues[q].txr);
2846                 rxr = &(queues[q].rxr);
2847
2848                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2849                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2850                                 "m_defrag() failed");
2851                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2852                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2853                                 "Driver dropped packets");
2854                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2855                                 CTLFLAG_RD, &(queues[q].irqs),
2856                                 "irqs on this queue");
2857                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2858                                 CTLFLAG_RD, &(queues[q].tso),
2859                                 "TSO");
2860                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2861                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2862                                 "Driver tx dma failure in xmit");
2863                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2864                                 CTLFLAG_RD, &(txr->no_desc),
2865                                 "Queue No Descriptor Available");
2866                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2867                                 CTLFLAG_RD, &(txr->total_packets),
2868                                 "Queue Packets Transmitted");
2869                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2870                                 CTLFLAG_RD, &(txr->tx_bytes),
2871                                 "Queue Bytes Transmitted");
2872                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2873                                 CTLFLAG_RD, &(rxr->rx_packets),
2874                                 "Queue Packets Received");
2875                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2876                                 CTLFLAG_RD, &(rxr->rx_bytes),
2877                                 "Queue Bytes Received");
2878
2879                 /* Examine queue state */
2880                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2881                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2882                                 sizeof(struct ixl_queue),
2883                                 ixlv_sysctl_qtx_tail_handler, "IU",
2884                                 "Queue Transmit Descriptor Tail");
2885                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2886                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2887                                 sizeof(struct ixl_queue),
2888                                 ixlv_sysctl_qrx_tail_handler, "IU",
2889                                 "Queue Receive Descriptor Tail");
2890         }
2891 }
2892
2893 static void
2894 ixlv_init_filters(struct ixlv_sc *sc)
2895 {
2896         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2897             M_DEVBUF, M_NOWAIT | M_ZERO);
2898         SLIST_INIT(sc->mac_filters);
2899         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2900             M_DEVBUF, M_NOWAIT | M_ZERO);
2901         SLIST_INIT(sc->vlan_filters);
2902         return;
2903 }
2904
2905 static void
2906 ixlv_free_filters(struct ixlv_sc *sc)
2907 {
2908         struct ixlv_mac_filter *f;
2909         struct ixlv_vlan_filter *v;
2910
2911         while (!SLIST_EMPTY(sc->mac_filters)) {
2912                 f = SLIST_FIRST(sc->mac_filters);
2913                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2914                 free(f, M_DEVBUF);
2915         }
2916         while (!SLIST_EMPTY(sc->vlan_filters)) {
2917                 v = SLIST_FIRST(sc->vlan_filters);
2918                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2919                 free(v, M_DEVBUF);
2920         }
2921         return;
2922 }
2923
2924 /**
2925  * ixlv_sysctl_qtx_tail_handler
2926  * Retrieves I40E_QTX_TAIL1 value from hardware
2927  * for a sysctl.
2928  */
2929 static int 
2930 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2931 {
2932         struct ixl_queue *que;
2933         int error;
2934         u32 val;
2935
2936         que = ((struct ixl_queue *)oidp->oid_arg1);
2937         if (!que) return 0;
2938
2939         val = rd32(que->vsi->hw, que->txr.tail);
2940         error = sysctl_handle_int(oidp, &val, 0, req);
2941         if (error || !req->newptr)
2942                 return error;
2943         return (0);
2944 }
2945
2946 /**
2947  * ixlv_sysctl_qrx_tail_handler
2948  * Retrieves I40E_QRX_TAIL1 value from hardware
2949  * for a sysctl.
2950  */
2951 static int 
2952 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2953 {
2954         struct ixl_queue *que;
2955         int error;
2956         u32 val;
2957
2958         que = ((struct ixl_queue *)oidp->oid_arg1);
2959         if (!que) return 0;
2960
2961         val = rd32(que->vsi->hw, que->rxr.tail);
2962         error = sysctl_handle_int(oidp, &val, 0, req);
2963         if (error || !req->newptr)
2964                 return error;
2965         return (0);
2966 }
2967