]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
Some RSS issues discovered by Adrian, missing header, variable
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38 #include "ixl.h"
39 #include "ixlv.h"
40
41 #ifdef RSS
42 #include <net/rss_config.h>
43 #endif
44
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 char ixlv_driver_version[] = "1.2.1";
49
50 /*********************************************************************
51  *  PCI Device ID Table
52  *
53  *  Used by probe to select devices to load on
54  *  Last field stores an index into ixlv_strings
55  *  Last entry must be all 0s
56  *
57  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  *********************************************************************/
59
60 static ixl_vendor_info_t ixlv_vendor_info_array[] =
61 {
62         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
63         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69  *  Table of branding strings
70  *********************************************************************/
71
72 static char    *ixlv_strings[] = {
73         "Intel(R) Ethernet Connection XL710 VF Driver"
74 };
75
76
77 /*********************************************************************
78  *  Function prototypes
79  *********************************************************************/
80 static int      ixlv_probe(device_t);
81 static int      ixlv_attach(device_t);
82 static int      ixlv_detach(device_t);
83 static int      ixlv_shutdown(device_t);
84 static void     ixlv_init_locked(struct ixlv_sc *);
85 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
86 static void     ixlv_free_pci_resources(struct ixlv_sc *);
87 static int      ixlv_assign_msix(struct ixlv_sc *);
88 static int      ixlv_init_msix(struct ixlv_sc *);
89 static int      ixlv_init_taskqueue(struct ixlv_sc *);
90 static int      ixlv_setup_queues(struct ixlv_sc *);
91 static void     ixlv_config_rss(struct ixlv_sc *);
92 static void     ixlv_stop(struct ixlv_sc *);
93 static void     ixlv_add_multi(struct ixl_vsi *);
94 static void     ixlv_del_multi(struct ixl_vsi *);
95 static void     ixlv_free_queues(struct ixl_vsi *);
96 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
97
98 static int      ixlv_media_change(struct ifnet *);
99 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
100
101 static void     ixlv_local_timer(void *);
102
103 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
104 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
105 static void     ixlv_init_filters(struct ixlv_sc *);
106 static void     ixlv_free_filters(struct ixlv_sc *);
107
108 static void     ixlv_msix_que(void *);
109 static void     ixlv_msix_adminq(void *);
110 static void     ixlv_do_adminq(void *, int);
111 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
112 static void     ixlv_handle_que(void *, int);
113 static int      ixlv_reset(struct ixlv_sc *);
114 static int      ixlv_reset_complete(struct i40e_hw *);
115 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
116 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
117 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
118                     enum i40e_status_code);
119
120 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
121 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
122 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
123 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
124
125 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
126 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
127 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
128
129 static void     ixlv_init_hw(struct ixlv_sc *);
130 static int      ixlv_setup_vc(struct ixlv_sc *);
131 static int      ixlv_vf_config(struct ixlv_sc *);
132
133 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
134                     struct ifnet *, int);
135
136 static void     ixlv_add_sysctls(struct ixlv_sc *);
137 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
138 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
139
140 /*********************************************************************
141  *  FreeBSD Device Interface Entry Points
142  *********************************************************************/
143
144 static device_method_t ixlv_methods[] = {
145         /* Device interface */
146         DEVMETHOD(device_probe, ixlv_probe),
147         DEVMETHOD(device_attach, ixlv_attach),
148         DEVMETHOD(device_detach, ixlv_detach),
149         DEVMETHOD(device_shutdown, ixlv_shutdown),
150         {0, 0}
151 };
152
153 static driver_t ixlv_driver = {
154         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
155 };
156
157 devclass_t ixlv_devclass;
158 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
159
160 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
162
163 /*
164 ** TUNEABLE PARAMETERS:
165 */
166
167 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
168                    "IXLV driver parameters");
169
170 /*
171 ** Number of descriptors per ring:
172 **   - TX and RX are the same size
173 */
174 static int ixlv_ringsz = DEFAULT_RING;
175 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
176 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
177     &ixlv_ringsz, 0, "Descriptor Ring Size");
178
179 /* Set to zero to auto calculate  */
180 int ixlv_max_queues = 0;
181 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
182 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
183     &ixlv_max_queues, 0, "Number of Queues");
184
185 /*
186 ** Number of entries in Tx queue buf_ring.
187 ** Increasing this will reduce the number of
188 ** errors when transmitting fragmented UDP
189 ** packets.
190 */
191 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
192 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
193 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
194     &ixlv_txbrsz, 0, "TX Buf Ring Size");
195
196 /*
197 ** Controls for Interrupt Throttling
198 **      - true/false for dynamic adjustment
199 **      - default values for static ITR
200 */
201 int ixlv_dynamic_rx_itr = 0;
202 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
203 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
204     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
205
206 int ixlv_dynamic_tx_itr = 0;
207 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
208 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
209     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
210
211 int ixlv_rx_itr = IXL_ITR_8K;
212 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
213 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
214     &ixlv_rx_itr, 0, "RX Interrupt Rate");
215
216 int ixlv_tx_itr = IXL_ITR_4K;
217 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
218 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
219     &ixlv_tx_itr, 0, "TX Interrupt Rate");
220
221         
222 /*********************************************************************
223  *  Device identification routine
224  *
225  *  ixlv_probe determines if the driver should be loaded on
226  *  the hardware based on PCI vendor/device id of the device.
227  *
228  *  return BUS_PROBE_DEFAULT on success, positive on failure
229  *********************************************************************/
230
231 static int
232 ixlv_probe(device_t dev)
233 {
234         ixl_vendor_info_t *ent;
235
236         u16     pci_vendor_id, pci_device_id;
237         u16     pci_subvendor_id, pci_subdevice_id;
238         char    device_name[256];
239
240         INIT_DEBUGOUT("ixlv_probe: begin");
241
242         pci_vendor_id = pci_get_vendor(dev);
243         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
244                 return (ENXIO);
245
246         pci_device_id = pci_get_device(dev);
247         pci_subvendor_id = pci_get_subvendor(dev);
248         pci_subdevice_id = pci_get_subdevice(dev);
249
250         ent = ixlv_vendor_info_array;
251         while (ent->vendor_id != 0) {
252                 if ((pci_vendor_id == ent->vendor_id) &&
253                     (pci_device_id == ent->device_id) &&
254
255                     ((pci_subvendor_id == ent->subvendor_id) ||
256                      (ent->subvendor_id == 0)) &&
257
258                     ((pci_subdevice_id == ent->subdevice_id) ||
259                      (ent->subdevice_id == 0))) {
260                         sprintf(device_name, "%s, Version - %s",
261                                 ixlv_strings[ent->index],
262                                 ixlv_driver_version);
263                         device_set_desc_copy(dev, device_name);
264                         return (BUS_PROBE_DEFAULT);
265                 }
266                 ent++;
267         }
268         return (ENXIO);
269 }
270
271 /*********************************************************************
272  *  Device initialization routine
273  *
274  *  The attach entry point is called when the driver is being loaded.
275  *  This routine identifies the type of hardware, allocates all resources
276  *  and initializes the hardware.
277  *
278  *  return 0 on success, positive on failure
279  *********************************************************************/
280
281 static int
282 ixlv_attach(device_t dev)
283 {
284         struct ixlv_sc  *sc;
285         struct i40e_hw  *hw;
286         struct ixl_vsi  *vsi;
287         int             error = 0;
288
289         INIT_DBG_DEV(dev, "begin");
290
291         /* Allocate, clear, and link in our primary soft structure */
292         sc = device_get_softc(dev);
293         sc->dev = sc->osdep.dev = dev;
294         hw = &sc->hw;
295         vsi = &sc->vsi;
296         vsi->dev = dev;
297
298         /* Initialize hw struct */
299         ixlv_init_hw(sc);
300
301         /* Allocate filter lists */
302         ixlv_init_filters(sc);
303
304         /* Core Lock Init*/
305         mtx_init(&sc->mtx, device_get_nameunit(dev),
306             "IXL SC Lock", MTX_DEF);
307
308         /* Set up the timer callout */
309         callout_init_mtx(&sc->timer, &sc->mtx, 0);
310
311         /* Do PCI setup - map BAR0, etc */
312         if (ixlv_allocate_pci_resources(sc)) {
313                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
314                     __func__);
315                 error = ENXIO;
316                 goto err_early;
317         }
318
319         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
320
321         error = i40e_set_mac_type(hw);
322         if (error) {
323                 device_printf(dev, "%s: set_mac_type failed: %d\n",
324                     __func__, error);
325                 goto err_pci_res;
326         }
327
328         error = ixlv_reset_complete(hw);
329         if (error) {
330                 device_printf(dev, "%s: Device is still being reset\n",
331                     __func__);
332                 goto err_pci_res;
333         }
334
335         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
336
337         error = ixlv_setup_vc(sc);
338         if (error) {
339                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
340                     __func__, error);
341                 goto err_pci_res;
342         }
343
344         INIT_DBG_DEV(dev, "PF API version verified");
345
346         /* TODO: Figure out why MDD events occur when this reset is removed. */
347         /* Need API version before sending reset message */
348         error = ixlv_reset(sc);
349         if (error) {
350                 device_printf(dev, "VF reset failed; reload the driver\n");
351                 goto err_aq;
352         }
353
354         INIT_DBG_DEV(dev, "VF reset complete");
355
356         /* Ask for VF config from PF */
357         error = ixlv_vf_config(sc);
358         if (error) {
359                 device_printf(dev, "Error getting configuration from PF: %d\n",
360                     error);
361                 goto err_aq;
362         }
363
364         INIT_DBG_DEV(dev, "VF config from PF:");
365         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
366             sc->vf_res->num_vsis,
367             sc->vf_res->num_queue_pairs,
368             sc->vf_res->max_vectors,
369             sc->vf_res->max_mtu);
370         INIT_DBG_DEV(dev, "Offload flags: %#010x",
371             sc->vf_res->vf_offload_flags);
372
373         // TODO: Move this into ixlv_vf_config?
374         /* got VF config message back from PF, now we can parse it */
375         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
376                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
377                         sc->vsi_res = &sc->vf_res->vsi_res[i];
378         }
379         if (!sc->vsi_res) {
380                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
381                 error = EIO;
382                 goto err_res_buf;
383         }
384
385         INIT_DBG_DEV(dev, "Resource Acquisition complete");
386
387         /* If no mac address was assigned just make a random one */
388         if (!ixlv_check_ether_addr(hw->mac.addr)) {
389                 u8 addr[ETHER_ADDR_LEN];
390                 arc4rand(&addr, sizeof(addr), 0);
391                 addr[0] &= 0xFE;
392                 addr[0] |= 0x02;
393                 bcopy(addr, hw->mac.addr, sizeof(addr));
394         }
395
396         vsi->id = sc->vsi_res->vsi_id;
397         vsi->back = (void *)sc;
398         vsi->link_up = TRUE;
399
400         /* This allocates the memory and early settings */
401         if (ixlv_setup_queues(sc) != 0) {
402                 device_printf(dev, "%s: setup queues failed!\n",
403                     __func__);
404                 error = EIO;
405                 goto out;
406         }
407
408         /* Setup the stack interface */
409         if (ixlv_setup_interface(dev, sc) != 0) {
410                 device_printf(dev, "%s: setup interface failed!\n",
411                     __func__);
412                 error = EIO;
413                 goto out;
414         }
415
416         INIT_DBG_DEV(dev, "Queue memory and interface setup");
417
418         /* Do queue interrupt setup */
419         ixlv_assign_msix(sc);
420
421         /* Start AdminQ taskqueue */
422         ixlv_init_taskqueue(sc);
423
424         /* Initialize stats */
425         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
426         ixlv_add_sysctls(sc);
427
428         /* Register for VLAN events */
429         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
430             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
431         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
432             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433
434         /* We want AQ enabled early */
435         ixlv_enable_adminq_irq(hw);
436
437         /* Set things up to run init */
438         sc->init_state = IXLV_INIT_READY;
439
440         ixl_vc_init_mgr(sc, &sc->vc_mgr);
441
442         INIT_DBG_DEV(dev, "end");
443         return (error);
444
445 out:
446         ixlv_free_queues(vsi);
447 err_res_buf:
448         free(sc->vf_res, M_DEVBUF);
449 err_aq:
450         i40e_shutdown_adminq(hw);
451 err_pci_res:
452         ixlv_free_pci_resources(sc);
453 err_early:
454         mtx_destroy(&sc->mtx);
455         ixlv_free_filters(sc);
456         INIT_DBG_DEV(dev, "end: error %d", error);
457         return (error);
458 }
459
460 /*********************************************************************
461  *  Device removal routine
462  *
463  *  The detach entry point is called when the driver is being removed.
464  *  This routine stops the adapter and deallocates all the resources
465  *  that were allocated for driver operation.
466  *
467  *  return 0 on success, positive on failure
468  *********************************************************************/
469
470 static int
471 ixlv_detach(device_t dev)
472 {
473         struct ixlv_sc  *sc = device_get_softc(dev);
474         struct ixl_vsi  *vsi = &sc->vsi;
475
476         INIT_DBG_DEV(dev, "begin");
477
478         /* Make sure VLANS are not using driver */
479         if (vsi->ifp->if_vlantrunk != NULL) {
480                 device_printf(dev, "Vlan in use, detach first\n");
481                 INIT_DBG_DEV(dev, "end");
482                 return (EBUSY);
483         }
484
485         /* Stop driver */
486         ether_ifdetach(vsi->ifp);
487         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
488                 mtx_lock(&sc->mtx);     
489                 ixlv_stop(sc);
490                 mtx_unlock(&sc->mtx);   
491         }
492
493         /* Unregister VLAN events */
494         if (vsi->vlan_attach != NULL)
495                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
496         if (vsi->vlan_detach != NULL)
497                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
498
499         /* Drain VC mgr */
500         callout_drain(&sc->vc_mgr.callout);
501
502         i40e_shutdown_adminq(&sc->hw);
503         taskqueue_free(sc->tq);
504         if_free(vsi->ifp);
505         free(sc->vf_res, M_DEVBUF);
506         ixlv_free_pci_resources(sc);
507         ixlv_free_queues(vsi);
508         mtx_destroy(&sc->mtx);
509         ixlv_free_filters(sc);
510
511         bus_generic_detach(dev);
512         INIT_DBG_DEV(dev, "end");
513         return (0);
514 }
515
516 /*********************************************************************
517  *
518  *  Shutdown entry point
519  *
520  **********************************************************************/
521
522 static int
523 ixlv_shutdown(device_t dev)
524 {
525         struct ixlv_sc  *sc = device_get_softc(dev);
526
527         INIT_DBG_DEV(dev, "begin");
528
529         mtx_lock(&sc->mtx);     
530         ixlv_stop(sc);
531         mtx_unlock(&sc->mtx);   
532
533         INIT_DBG_DEV(dev, "end");
534         return (0);
535 }
536
537 /*
538  * Configure TXCSUM(IPV6) and TSO(4/6)
539  *      - the hardware handles these together so we
540  *        need to tweak them 
541  */
542 static void
543 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
544 {
545         /* Enable/disable TXCSUM/TSO4 */
546         if (!(ifp->if_capenable & IFCAP_TXCSUM)
547             && !(ifp->if_capenable & IFCAP_TSO4)) {
548                 if (mask & IFCAP_TXCSUM) {
549                         ifp->if_capenable |= IFCAP_TXCSUM;
550                         /* enable TXCSUM, restore TSO if previously enabled */
551                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
552                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
553                                 ifp->if_capenable |= IFCAP_TSO4;
554                         }
555                 }
556                 else if (mask & IFCAP_TSO4) {
557                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
558                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
559                         if_printf(ifp,
560                             "TSO4 requires txcsum, enabling both...\n");
561                 }
562         } else if((ifp->if_capenable & IFCAP_TXCSUM)
563             && !(ifp->if_capenable & IFCAP_TSO4)) {
564                 if (mask & IFCAP_TXCSUM)
565                         ifp->if_capenable &= ~IFCAP_TXCSUM;
566                 else if (mask & IFCAP_TSO4)
567                         ifp->if_capenable |= IFCAP_TSO4;
568         } else if((ifp->if_capenable & IFCAP_TXCSUM)
569             && (ifp->if_capenable & IFCAP_TSO4)) {
570                 if (mask & IFCAP_TXCSUM) {
571                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
572                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
573                         if_printf(ifp, 
574                             "TSO4 requires txcsum, disabling both...\n");
575                 } else if (mask & IFCAP_TSO4)
576                         ifp->if_capenable &= ~IFCAP_TSO4;
577         }
578
579         /* Enable/disable TXCSUM_IPV6/TSO6 */
580         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
581             && !(ifp->if_capenable & IFCAP_TSO6)) {
582                 if (mask & IFCAP_TXCSUM_IPV6) {
583                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
584                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
585                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
586                                 ifp->if_capenable |= IFCAP_TSO6;
587                         }
588                 } else if (mask & IFCAP_TSO6) {
589                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
590                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
591                         if_printf(ifp,
592                             "TSO6 requires txcsum6, enabling both...\n");
593                 }
594         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
595             && !(ifp->if_capenable & IFCAP_TSO6)) {
596                 if (mask & IFCAP_TXCSUM_IPV6)
597                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
598                 else if (mask & IFCAP_TSO6)
599                         ifp->if_capenable |= IFCAP_TSO6;
600         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
601             && (ifp->if_capenable & IFCAP_TSO6)) {
602                 if (mask & IFCAP_TXCSUM_IPV6) {
603                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
604                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
605                         if_printf(ifp,
606                             "TSO6 requires txcsum6, disabling both...\n");
607                 } else if (mask & IFCAP_TSO6)
608                         ifp->if_capenable &= ~IFCAP_TSO6;
609         }
610 }
611
612 /*********************************************************************
613  *  Ioctl entry point
614  *
615  *  ixlv_ioctl is called when the user wants to configure the
616  *  interface.
617  *
618  *  return 0 on success, positive on failure
619  **********************************************************************/
620
621 static int
622 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
623 {
624         struct ixl_vsi          *vsi = ifp->if_softc;
625         struct ixlv_sc  *sc = vsi->back;
626         struct ifreq            *ifr = (struct ifreq *)data;
627 #if defined(INET) || defined(INET6)
628         struct ifaddr           *ifa = (struct ifaddr *)data;
629         bool                    avoid_reset = FALSE;
630 #endif
631         int                     error = 0;
632
633
634         switch (command) {
635
636         case SIOCSIFADDR:
637 #ifdef INET
638                 if (ifa->ifa_addr->sa_family == AF_INET)
639                         avoid_reset = TRUE;
640 #endif
641 #ifdef INET6
642                 if (ifa->ifa_addr->sa_family == AF_INET6)
643                         avoid_reset = TRUE;
644 #endif
645 #if defined(INET) || defined(INET6)
646                 /*
647                 ** Calling init results in link renegotiation,
648                 ** so we avoid doing it when possible.
649                 */
650                 if (avoid_reset) {
651                         ifp->if_flags |= IFF_UP;
652                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
653                                 ixlv_init(vsi);
654 #ifdef INET
655                         if (!(ifp->if_flags & IFF_NOARP))
656                                 arp_ifinit(ifp, ifa);
657 #endif
658                 } else
659                         error = ether_ioctl(ifp, command, data);
660                 break;
661 #endif
662         case SIOCSIFMTU:
663                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
664                 mtx_lock(&sc->mtx);
665                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
666                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
667                         error = EINVAL;
668                         IOCTL_DBG_IF(ifp, "mtu too large");
669                 } else {
670                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
671                         // ERJ: Interestingly enough, these types don't match
672                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
673                         vsi->max_frame_size =
674                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
675                             + ETHER_VLAN_ENCAP_LEN;
676                         ixlv_init_locked(sc);
677                 }
678                 mtx_unlock(&sc->mtx);
679                 break;
680         case SIOCSIFFLAGS:
681                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
682                 mtx_lock(&sc->mtx);
683                 if (ifp->if_flags & IFF_UP) {
684                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
685                                 ixlv_init_locked(sc);
686                 } else
687                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
688                                 ixlv_stop(sc);
689                 sc->if_flags = ifp->if_flags;
690                 mtx_unlock(&sc->mtx);
691                 break;
692         case SIOCADDMULTI:
693                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
694                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
695                         mtx_lock(&sc->mtx);
696                         ixlv_disable_intr(vsi);
697                         ixlv_add_multi(vsi);
698                         ixlv_enable_intr(vsi);
699                         mtx_unlock(&sc->mtx);
700                 }
701                 break;
702         case SIOCDELMULTI:
703                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
704                 if (sc->init_state == IXLV_RUNNING) {
705                         mtx_lock(&sc->mtx);
706                         ixlv_disable_intr(vsi);
707                         ixlv_del_multi(vsi);
708                         ixlv_enable_intr(vsi);
709                         mtx_unlock(&sc->mtx);
710                 }
711                 break;
712         case SIOCSIFMEDIA:
713         case SIOCGIFMEDIA:
714                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
715                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
716                 break;
717         case SIOCSIFCAP:
718         {
719                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
720                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
721
722                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
723
724                 if (mask & IFCAP_RXCSUM)
725                         ifp->if_capenable ^= IFCAP_RXCSUM;
726                 if (mask & IFCAP_RXCSUM_IPV6)
727                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
728                 if (mask & IFCAP_LRO)
729                         ifp->if_capenable ^= IFCAP_LRO;
730                 if (mask & IFCAP_VLAN_HWTAGGING)
731                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
732                 if (mask & IFCAP_VLAN_HWFILTER)
733                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
734                 if (mask & IFCAP_VLAN_HWTSO)
735                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
736                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
737                         ixlv_init(vsi);
738                 }
739                 VLAN_CAPABILITIES(ifp);
740
741                 break;
742         }
743
744         default:
745                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
746                 error = ether_ioctl(ifp, command, data);
747                 break;
748         }
749
750         return (error);
751 }
752
753 /*
754 ** To do a reinit on the VF is unfortunately more complicated
755 ** than a physical device, we must have the PF more or less
756 ** completely recreate our memory, so many things that were
757 ** done only once at attach in traditional drivers now must be
758 ** redone at each reinitialization. This function does that
759 ** 'prelude' so we can then call the normal locked init code.
760 */
761 int
762 ixlv_reinit_locked(struct ixlv_sc *sc)
763 {
764         struct i40e_hw          *hw = &sc->hw;
765         struct ixl_vsi          *vsi = &sc->vsi;
766         struct ifnet            *ifp = vsi->ifp;
767         struct ixlv_mac_filter  *mf, *mf_temp;
768         struct ixlv_vlan_filter *vf;
769         int                     error = 0;
770
771         INIT_DBG_IF(ifp, "begin");
772
773         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
774                 ixlv_stop(sc);
775
776         error = ixlv_reset(sc);
777
778         INIT_DBG_IF(ifp, "VF was reset");
779
780         /* set the state in case we went thru RESET */
781         sc->init_state = IXLV_RUNNING;
782
783         /*
784         ** Resetting the VF drops all filters from hardware;
785         ** we need to mark them to be re-added in init.
786         */
787         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
788                 if (mf->flags & IXL_FILTER_DEL) {
789                         SLIST_REMOVE(sc->mac_filters, mf,
790                             ixlv_mac_filter, next);
791                         free(mf, M_DEVBUF);
792                 } else
793                         mf->flags |= IXL_FILTER_ADD;
794         }
795         if (vsi->num_vlans != 0)
796                 SLIST_FOREACH(vf, sc->vlan_filters, next)
797                         vf->flags = IXL_FILTER_ADD;
798         else { /* clean any stale filters */
799                 while (!SLIST_EMPTY(sc->vlan_filters)) {
800                         vf = SLIST_FIRST(sc->vlan_filters);
801                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
802                         free(vf, M_DEVBUF);
803                 }
804         }
805
806         ixlv_enable_adminq_irq(hw);
807         ixl_vc_flush(&sc->vc_mgr);
808
809         INIT_DBG_IF(ifp, "end");
810         return (error);
811 }
812
813 static void
814 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
815         enum i40e_status_code code)
816 {
817         struct ixlv_sc *sc;
818
819         sc = arg;
820
821         /*
822          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
823          * happens while a command is in progress, so we don't print an error
824          * in that case.
825          */
826         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
827                 if_printf(sc->vsi.ifp,
828                     "Error %d waiting for PF to complete operation %d\n",
829                     code, cmd->request);
830         }
831 }
832
833 static void
834 ixlv_init_locked(struct ixlv_sc *sc)
835 {
836         struct i40e_hw          *hw = &sc->hw;
837         struct ixl_vsi          *vsi = &sc->vsi;
838         struct ixl_queue        *que = vsi->queues;
839         struct ifnet            *ifp = vsi->ifp;
840         int                      error = 0;
841
842         INIT_DBG_IF(ifp, "begin");
843
844         IXLV_CORE_LOCK_ASSERT(sc);
845
846         /* Do a reinit first if an init has already been done */
847         if ((sc->init_state == IXLV_RUNNING) ||
848             (sc->init_state == IXLV_RESET_REQUIRED) ||
849             (sc->init_state == IXLV_RESET_PENDING))
850                 error = ixlv_reinit_locked(sc);
851         /* Don't bother with init if we failed reinit */
852         if (error)
853                 goto init_done;
854
855         /* Remove existing MAC filter if new MAC addr is set */
856         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
857                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
858                 if (error == 0)
859                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
860                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
861                             sc);
862         }
863
864         /* Check for an LAA mac address... */
865         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
866
867         ifp->if_hwassist = 0;
868         if (ifp->if_capenable & IFCAP_TSO)
869                 ifp->if_hwassist |= CSUM_TSO;
870         if (ifp->if_capenable & IFCAP_TXCSUM)
871                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
872         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
873                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
874
875         /* Add mac filter for this VF to PF */
876         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
877                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
878                 if (!error || error == EEXIST)
879                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
880                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
881                             sc);
882         }
883
884         /* Setup vlan's if needed */
885         ixlv_setup_vlan_filters(sc);
886
887         /* Prepare the queues for operation */
888         for (int i = 0; i < vsi->num_queues; i++, que++) {
889                 struct  rx_ring *rxr = &que->rxr;
890
891                 ixl_init_tx_ring(que);
892
893                 if (vsi->max_frame_size <= 2048)
894                         rxr->mbuf_sz = MCLBYTES;
895                 else
896                         rxr->mbuf_sz = MJUMPAGESIZE;
897                 ixl_init_rx_ring(que);
898         }
899
900         /* Configure queues */
901         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
902             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
903
904         /* Set up RSS */
905         ixlv_config_rss(sc);
906
907         /* Map vectors */
908         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
909             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
910
911         /* Enable queues */
912         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
913             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
914
915         /* Start the local timer */
916         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
917
918         sc->init_state = IXLV_RUNNING;
919
920 init_done:
921         INIT_DBG_IF(ifp, "end");
922         return;
923 }
924
925 /*
926 **  Init entry point for the stack
927 */
928 void
929 ixlv_init(void *arg)
930 {
931         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
932         struct ixlv_sc *sc = vsi->back;
933         int retries = 0;
934
935         mtx_lock(&sc->mtx);
936         ixlv_init_locked(sc);
937         mtx_unlock(&sc->mtx);
938
939         /* Wait for init_locked to finish */
940         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
941             && ++retries < 100) {
942                 i40e_msec_delay(10);
943         }
944         if (retries >= IXLV_AQ_MAX_ERR)
945                 if_printf(vsi->ifp,
946                     "Init failed to complete in alloted time!\n");
947 }
948
949 /*
950  * ixlv_attach() helper function; gathers information about
951  * the (virtual) hardware for use elsewhere in the driver.
952  */
953 static void
954 ixlv_init_hw(struct ixlv_sc *sc)
955 {
956         struct i40e_hw *hw = &sc->hw;
957         device_t dev = sc->dev;
958         
959         /* Save off the information about this board */
960         hw->vendor_id = pci_get_vendor(dev);
961         hw->device_id = pci_get_device(dev);
962         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
963         hw->subsystem_vendor_id =
964             pci_read_config(dev, PCIR_SUBVEND_0, 2);
965         hw->subsystem_device_id =
966             pci_read_config(dev, PCIR_SUBDEV_0, 2);
967
968         hw->bus.device = pci_get_slot(dev);
969         hw->bus.func = pci_get_function(dev);
970 }
971
972 /*
973  * ixlv_attach() helper function; initalizes the admin queue
974  * and attempts to establish contact with the PF by
975  * retrying the initial "API version" message several times
976  * or until the PF responds.
977  */
978 static int
979 ixlv_setup_vc(struct ixlv_sc *sc)
980 {
981         struct i40e_hw *hw = &sc->hw;
982         device_t dev = sc->dev;
983         int error = 0, ret_error = 0, asq_retries = 0;
984         bool send_api_ver_retried = 0;
985
986         /* Need to set these AQ paramters before initializing AQ */
987         hw->aq.num_arq_entries = IXL_AQ_LEN;
988         hw->aq.num_asq_entries = IXL_AQ_LEN;
989         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
990         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
991
992         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
993                 /* Initialize admin queue */
994                 error = i40e_init_adminq(hw);
995                 if (error) {
996                         device_printf(dev, "%s: init_adminq failed: %d\n",
997                             __func__, error);
998                         ret_error = 1;
999                         continue;
1000                 }
1001
1002                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1003
1004 retry_send:
1005                 /* Send VF's API version */
1006                 error = ixlv_send_api_ver(sc);
1007                 if (error) {
1008                         i40e_shutdown_adminq(hw);
1009                         ret_error = 2;
1010                         device_printf(dev, "%s: unable to send api"
1011                             " version to PF on attempt %d, error %d\n",
1012                             __func__, i+1, error);
1013                 }
1014
1015                 asq_retries = 0;
1016                 while (!i40e_asq_done(hw)) {
1017                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1018                                 i40e_shutdown_adminq(hw);
1019                                 DDPRINTF(dev, "Admin Queue timeout "
1020                                     "(waiting for send_api_ver), %d more retries...",
1021                                     IXLV_AQ_MAX_ERR - (i + 1));
1022                                 ret_error = 3;
1023                                 break;
1024                         } 
1025                         i40e_msec_delay(10);
1026                 }
1027                 if (asq_retries > IXLV_AQ_MAX_ERR)
1028                         continue;
1029
1030                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1031
1032                 /* Verify that the VF accepts the PF's API version */
1033                 error = ixlv_verify_api_ver(sc);
1034                 if (error == ETIMEDOUT) {
1035                         if (!send_api_ver_retried) {
1036                                 /* Resend message, one more time */
1037                                 send_api_ver_retried++;
1038                                 device_printf(dev,
1039                                     "%s: Timeout while verifying API version on first"
1040                                     " try!\n", __func__);
1041                                 goto retry_send;
1042                         } else {
1043                                 device_printf(dev,
1044                                     "%s: Timeout while verifying API version on second"
1045                                     " try!\n", __func__);
1046                                 ret_error = 4;
1047                                 break;
1048                         }
1049                 }
1050                 if (error) {
1051                         device_printf(dev,
1052                             "%s: Unable to verify API version,"
1053                             " error %d\n", __func__, error);
1054                         ret_error = 5;
1055                 }
1056                 break;
1057         }
1058
1059         if (ret_error >= 4)
1060                 i40e_shutdown_adminq(hw);
1061         return (ret_error);
1062 }
1063
1064 /*
1065  * ixlv_attach() helper function; asks the PF for this VF's
1066  * configuration, and saves the information if it receives it.
1067  */
1068 static int
1069 ixlv_vf_config(struct ixlv_sc *sc)
1070 {
1071         struct i40e_hw *hw = &sc->hw;
1072         device_t dev = sc->dev;
1073         int bufsz, error = 0, ret_error = 0;
1074         int asq_retries, retried = 0;
1075
1076 retry_config:
1077         error = ixlv_send_vf_config_msg(sc);
1078         if (error) {
1079                 device_printf(dev,
1080                     "%s: Unable to send VF config request, attempt %d,"
1081                     " error %d\n", __func__, retried + 1, error);
1082                 ret_error = 2;
1083         }
1084
1085         asq_retries = 0;
1086         while (!i40e_asq_done(hw)) {
1087                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1088                         device_printf(dev, "%s: Admin Queue timeout "
1089                             "(waiting for send_vf_config_msg), attempt %d\n",
1090                             __func__, retried + 1);
1091                         ret_error = 3;
1092                         goto fail;
1093                 }
1094                 i40e_msec_delay(10);
1095         }
1096
1097         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1098             retried + 1);
1099
1100         if (!sc->vf_res) {
1101                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1102                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1103                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1104                 if (!sc->vf_res) {
1105                         device_printf(dev,
1106                             "%s: Unable to allocate memory for VF configuration"
1107                             " message from PF on attempt %d\n", __func__, retried + 1);
1108                         ret_error = 1;
1109                         goto fail;
1110                 }
1111         }
1112
1113         /* Check for VF config response */
1114         error = ixlv_get_vf_config(sc);
1115         if (error == ETIMEDOUT) {
1116                 /* The 1st time we timeout, send the configuration message again */
1117                 if (!retried) {
1118                         retried++;
1119                         goto retry_config;
1120                 }
1121         }
1122         if (error) {
1123                 device_printf(dev,
1124                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1125                     __func__, retried + 1);
1126                 ret_error = 4;
1127         }
1128         goto done;
1129
1130 fail:
1131         free(sc->vf_res, M_DEVBUF);
1132 done:
1133         return (ret_error);
1134 }
1135
1136 /*
1137  * Allocate MSI/X vectors, setup the AQ vector early
1138  */
1139 static int
1140 ixlv_init_msix(struct ixlv_sc *sc)
1141 {
1142         device_t dev = sc->dev;
1143         int rid, want, vectors, queues, available;
1144
1145         rid = PCIR_BAR(IXL_BAR);
1146         sc->msix_mem = bus_alloc_resource_any(dev,
1147             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1148         if (!sc->msix_mem) {
1149                 /* May not be enabled */
1150                 device_printf(sc->dev,
1151                     "Unable to map MSIX table \n");
1152                 goto fail;
1153         }
1154
1155         available = pci_msix_count(dev); 
1156         if (available == 0) { /* system has msix disabled */
1157                 bus_release_resource(dev, SYS_RES_MEMORY,
1158                     rid, sc->msix_mem);
1159                 sc->msix_mem = NULL;
1160                 goto fail;
1161         }
1162
1163         /* Figure out a reasonable auto config value */
1164         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1165
1166         /* Override with hardcoded value if sane */
1167         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1168                 queues = ixlv_max_queues;
1169 #ifdef  RSS
1170         /* If we're doing RSS, clamp at the number of RSS buckets */
1171         if (queues > rss_getnumbuckets())
1172                 queues = rss_getnumbuckets();
1173 #endif
1174         /* Enforce the VF max value */
1175         if (queues > IXLV_MAX_QUEUES)
1176                 queues = IXLV_MAX_QUEUES;
1177
1178         /*
1179         ** Want one vector (RX/TX pair) per queue
1180         ** plus an additional for the admin queue.
1181         */
1182         want = queues + 1;
1183         if (want <= available)  /* Have enough */
1184                 vectors = want;
1185         else {
1186                 device_printf(sc->dev,
1187                     "MSIX Configuration Problem, "
1188                     "%d vectors available but %d wanted!\n",
1189                     available, want);
1190                 goto fail;
1191         }
1192
1193 #ifdef RSS
1194         /*
1195         * If we're doing RSS, the number of queues needs to
1196         * match the number of RSS buckets that are configured.
1197         *
1198         * + If there's more queues than RSS buckets, we'll end
1199         *   up with queues that get no traffic.
1200         *
1201         * + If there's more RSS buckets than queues, we'll end
1202         *   up having multiple RSS buckets map to the same queue,
1203         *   so there'll be some contention.
1204         */
1205         if (queues != rss_getnumbuckets()) {
1206                 device_printf(dev,
1207                     "%s: queues (%d) != RSS buckets (%d)"
1208                     "; performance will be impacted.\n",
1209                      __func__, queues, rss_getnumbuckets());
1210         }
1211 #endif
1212
1213         if (pci_alloc_msix(dev, &vectors) == 0) {
1214                 device_printf(sc->dev,
1215                     "Using MSIX interrupts with %d vectors\n", vectors);
1216                 sc->msix = vectors;
1217                 sc->vsi.num_queues = queues;
1218         }
1219
1220         /*
1221         ** Explicitly set the guest PCI BUSMASTER capability
1222         ** and we must rewrite the ENABLE in the MSIX control
1223         ** register again at this point to cause the host to
1224         ** successfully initialize us.
1225         */
1226         {
1227                 u16 pci_cmd_word;
1228                 int msix_ctrl;
1229                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1230                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1231                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1232                 pci_find_cap(dev, PCIY_MSIX, &rid);
1233                 rid += PCIR_MSIX_CTRL;
1234                 msix_ctrl = pci_read_config(dev, rid, 2);
1235                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1236                 pci_write_config(dev, rid, msix_ctrl, 2);
1237         }
1238
1239         /* Next we need to setup the vector for the Admin Queue */
1240         rid = 1;        // zero vector + 1
1241         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1242             &rid, RF_SHAREABLE | RF_ACTIVE);
1243         if (sc->res == NULL) {
1244                 device_printf(dev,"Unable to allocate"
1245                     " bus resource: AQ interrupt \n");
1246                 goto fail;
1247         }
1248         if (bus_setup_intr(dev, sc->res,
1249             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1250             ixlv_msix_adminq, sc, &sc->tag)) {
1251                 sc->res = NULL;
1252                 device_printf(dev, "Failed to register AQ handler");
1253                 goto fail;
1254         }
1255         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1256
1257         return (vectors);
1258
1259 fail:
1260         /* The VF driver MUST use MSIX */
1261         return (0);
1262 }
1263
1264 static int
1265 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1266 {
1267         int             rid;
1268         device_t        dev = sc->dev;
1269
1270         rid = PCIR_BAR(0);
1271         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1272             &rid, RF_ACTIVE);
1273
1274         if (!(sc->pci_mem)) {
1275                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1276                 return (ENXIO);
1277         }
1278
1279         sc->osdep.mem_bus_space_tag =
1280                 rman_get_bustag(sc->pci_mem);
1281         sc->osdep.mem_bus_space_handle =
1282                 rman_get_bushandle(sc->pci_mem);
1283         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1284         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1285         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1286
1287         sc->hw.back = &sc->osdep;
1288
1289         /* Disable adminq interrupts */
1290         ixlv_disable_adminq_irq(&sc->hw);
1291
1292         /*
1293         ** Now setup MSI/X, it will return
1294         ** us the number of supported vectors
1295         */
1296         sc->msix = ixlv_init_msix(sc);
1297
1298         /* We fail without MSIX support */
1299         if (sc->msix == 0)
1300                 return (ENXIO);
1301
1302         return (0);
1303 }
1304
1305 static void
1306 ixlv_free_pci_resources(struct ixlv_sc *sc)
1307 {
1308         struct ixl_vsi         *vsi = &sc->vsi;
1309         struct ixl_queue       *que = vsi->queues;
1310         device_t                dev = sc->dev;
1311
1312         /* We may get here before stations are setup */
1313         if (que == NULL)
1314                 goto early;
1315
1316         /*
1317         **  Release all msix queue resources:
1318         */
1319         for (int i = 0; i < vsi->num_queues; i++, que++) {
1320                 int rid = que->msix + 1;
1321                 if (que->tag != NULL) {
1322                         bus_teardown_intr(dev, que->res, que->tag);
1323                         que->tag = NULL;
1324                 }
1325                 if (que->res != NULL)
1326                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1327         }
1328         
1329 early:
1330         /* Clean the AdminQ interrupt */
1331         if (sc->tag != NULL) {
1332                 bus_teardown_intr(dev, sc->res, sc->tag);
1333                 sc->tag = NULL;
1334         }
1335         if (sc->res != NULL)
1336                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1337
1338         pci_release_msi(dev);
1339
1340         if (sc->msix_mem != NULL)
1341                 bus_release_resource(dev, SYS_RES_MEMORY,
1342                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1343
1344         if (sc->pci_mem != NULL)
1345                 bus_release_resource(dev, SYS_RES_MEMORY,
1346                     PCIR_BAR(0), sc->pci_mem);
1347
1348         return;
1349 }
1350
1351 /*
1352  * Create taskqueue and tasklet for Admin Queue interrupts.
1353  */
1354 static int
1355 ixlv_init_taskqueue(struct ixlv_sc *sc)
1356 {
1357         int error = 0;
1358
1359         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1360
1361         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1362             taskqueue_thread_enqueue, &sc->tq);
1363         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1364             device_get_nameunit(sc->dev));
1365
1366         return (error);
1367 }
1368
1369 /*********************************************************************
1370  *
1371  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1372  *
1373  **********************************************************************/
1374 static int
1375 ixlv_assign_msix(struct ixlv_sc *sc)
1376 {
1377         device_t        dev = sc->dev;
1378         struct          ixl_vsi *vsi = &sc->vsi;
1379         struct          ixl_queue *que = vsi->queues;
1380         struct          tx_ring  *txr;
1381         int             error, rid, vector = 1;
1382
1383         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1384                 int cpu_id = i;
1385                 rid = vector + 1;
1386                 txr = &que->txr;
1387                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1388                     RF_SHAREABLE | RF_ACTIVE);
1389                 if (que->res == NULL) {
1390                         device_printf(dev,"Unable to allocate"
1391                             " bus resource: que interrupt [%d]\n", vector);
1392                         return (ENXIO);
1393                 }
1394                 /* Set the handler function */
1395                 error = bus_setup_intr(dev, que->res,
1396                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1397                     ixlv_msix_que, que, &que->tag);
1398                 if (error) {
1399                         que->res = NULL;
1400                         device_printf(dev, "Failed to register que handler");
1401                         return (error);
1402                 }
1403                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1404                 /* Bind the vector to a CPU */
1405 #ifdef RSS
1406                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1407 #endif
1408                 bus_bind_intr(dev, que->res, cpu_id);
1409                 que->msix = vector;
1410                 vsi->que_mask |= (u64)(1 << que->msix);
1411                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1412                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1413                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1414                     taskqueue_thread_enqueue, &que->tq);
1415 #ifdef RSS
1416                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1417                     cpu_id, "%s (bucket %d)",
1418                     device_get_nameunit(dev), cpu_id);
1419 #else
1420                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1421                     "%s que", device_get_nameunit(dev));
1422 #endif
1423
1424         }
1425
1426         return (0);
1427 }
1428
1429 /*
1430 ** Requests a VF reset from the PF.
1431 **
1432 ** Requires the VF's Admin Queue to be initialized.
1433 */
1434 static int
1435 ixlv_reset(struct ixlv_sc *sc)
1436 {
1437         struct i40e_hw  *hw = &sc->hw;
1438         device_t        dev = sc->dev;
1439         int             error = 0;
1440
1441         /* Ask the PF to reset us if we are initiating */
1442         if (sc->init_state != IXLV_RESET_PENDING)
1443                 ixlv_request_reset(sc);
1444
1445         i40e_msec_delay(100);
1446         error = ixlv_reset_complete(hw);
1447         if (error) {
1448                 device_printf(dev, "%s: VF reset failed\n",
1449                     __func__);
1450                 return (error);
1451         }
1452
1453         error = i40e_shutdown_adminq(hw);
1454         if (error) {
1455                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1456                     __func__, error);
1457                 return (error);
1458         }
1459
1460         error = i40e_init_adminq(hw);
1461         if (error) {
1462                 device_printf(dev, "%s: init_adminq failed: %d\n",
1463                     __func__, error);
1464                 return(error);
1465         }
1466
1467         return (0);
1468 }
1469
1470 static int
1471 ixlv_reset_complete(struct i40e_hw *hw)
1472 {
1473         u32 reg;
1474
1475         for (int i = 0; i < 100; i++) {
1476                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1477                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1478
1479                 if ((reg == I40E_VFR_VFACTIVE) ||
1480                     (reg == I40E_VFR_COMPLETED))
1481                         return (0);
1482                 i40e_msec_delay(100);
1483         }
1484
1485         return (EBUSY);
1486 }
1487
1488
1489 /*********************************************************************
1490  *
1491  *  Setup networking device structure and register an interface.
1492  *
1493  **********************************************************************/
1494 static int
1495 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1496 {
1497         struct ifnet            *ifp;
1498         struct ixl_vsi          *vsi = &sc->vsi;
1499         struct ixl_queue        *que = vsi->queues;
1500
1501         INIT_DBG_DEV(dev, "begin");
1502
1503         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1504         if (ifp == NULL) {
1505                 device_printf(dev, "%s: could not allocate ifnet"
1506                     " structure!\n", __func__);
1507                 return (-1);
1508         }
1509
1510         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1511
1512         ifp->if_mtu = ETHERMTU;
1513         ifp->if_baudrate = 4000000000;  // ??
1514         ifp->if_init = ixlv_init;
1515         ifp->if_softc = vsi;
1516         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1517         ifp->if_ioctl = ixlv_ioctl;
1518
1519 #if __FreeBSD_version >= 1100000
1520         if_setgetcounterfn(ifp, ixl_get_counter);
1521 #endif
1522
1523         ifp->if_transmit = ixl_mq_start;
1524
1525         ifp->if_qflush = ixl_qflush;
1526         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1527
1528         ether_ifattach(ifp, sc->hw.mac.addr);
1529
1530         vsi->max_frame_size =
1531             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1532             + ETHER_VLAN_ENCAP_LEN;
1533
1534         /*
1535          * Tell the upper layer(s) we support long frames.
1536          */
1537         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1538
1539         ifp->if_capabilities |= IFCAP_HWCSUM;
1540         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1541         ifp->if_capabilities |= IFCAP_TSO;
1542         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1543
1544         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1545                              |  IFCAP_VLAN_HWTSO
1546                              |  IFCAP_VLAN_MTU
1547                              |  IFCAP_VLAN_HWCSUM
1548                              |  IFCAP_LRO;
1549         ifp->if_capenable = ifp->if_capabilities;
1550
1551         /*
1552         ** Don't turn this on by default, if vlans are
1553         ** created on another pseudo device (eg. lagg)
1554         ** then vlan events are not passed thru, breaking
1555         ** operation, but with HW FILTER off it works. If
1556         ** using vlans directly on the ixl driver you can
1557         ** enable this and get full hardware tag filtering.
1558         */
1559         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1560
1561         /*
1562          * Specify the media types supported by this adapter and register
1563          * callbacks to update media and link information
1564          */
1565         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1566                      ixlv_media_status);
1567
1568         // JFV Add media types later?
1569
1570         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1571         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1572
1573         INIT_DBG_DEV(dev, "end");
1574         return (0);
1575 }
1576
1577 /*
1578 ** Allocate and setup the interface queues
1579 */
1580 static int
1581 ixlv_setup_queues(struct ixlv_sc *sc)
1582 {
1583         device_t                dev = sc->dev;
1584         struct ixl_vsi          *vsi;
1585         struct ixl_queue        *que;
1586         struct tx_ring          *txr;
1587         struct rx_ring          *rxr;
1588         int                     rsize, tsize;
1589         int                     error = I40E_SUCCESS;
1590
1591         vsi = &sc->vsi;
1592         vsi->back = (void *)sc;
1593         vsi->hw = &sc->hw;
1594         vsi->num_vlans = 0;
1595
1596         /* Get memory for the station queues */
1597         if (!(vsi->queues =
1598                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1599                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1600                         device_printf(dev, "Unable to allocate queue memory\n");
1601                         error = ENOMEM;
1602                         goto early;
1603         }
1604
1605         for (int i = 0; i < vsi->num_queues; i++) {
1606                 que = &vsi->queues[i];
1607                 que->num_desc = ixlv_ringsz;
1608                 que->me = i;
1609                 que->vsi = vsi;
1610                 /* mark the queue as active */
1611                 vsi->active_queues |= (u64)1 << que->me;
1612
1613                 txr = &que->txr;
1614                 txr->que = que;
1615                 txr->tail = I40E_QTX_TAIL1(que->me);
1616                 /* Initialize the TX lock */
1617                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1618                     device_get_nameunit(dev), que->me);
1619                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1620                 /*
1621                 ** Create the TX descriptor ring, the extra int is
1622                 ** added as the location for HEAD WB.
1623                 */
1624                 tsize = roundup2((que->num_desc *
1625                     sizeof(struct i40e_tx_desc)) +
1626                     sizeof(u32), DBA_ALIGN);
1627                 if (i40e_allocate_dma_mem(&sc->hw,
1628                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1629                         device_printf(dev,
1630                             "Unable to allocate TX Descriptor memory\n");
1631                         error = ENOMEM;
1632                         goto fail;
1633                 }
1634                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1635                 bzero((void *)txr->base, tsize);
1636                 /* Now allocate transmit soft structs for the ring */
1637                 if (ixl_allocate_tx_data(que)) {
1638                         device_printf(dev,
1639                             "Critical Failure setting up TX structures\n");
1640                         error = ENOMEM;
1641                         goto fail;
1642                 }
1643                 /* Allocate a buf ring */
1644                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1645                     M_WAITOK, &txr->mtx);
1646                 if (txr->br == NULL) {
1647                         device_printf(dev,
1648                             "Critical Failure setting up TX buf ring\n");
1649                         error = ENOMEM;
1650                         goto fail;
1651                 }
1652
1653                 /*
1654                  * Next the RX queues...
1655                  */ 
1656                 rsize = roundup2(que->num_desc *
1657                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1658                 rxr = &que->rxr;
1659                 rxr->que = que;
1660                 rxr->tail = I40E_QRX_TAIL1(que->me);
1661
1662                 /* Initialize the RX side lock */
1663                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1664                     device_get_nameunit(dev), que->me);
1665                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1666
1667                 if (i40e_allocate_dma_mem(&sc->hw,
1668                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1669                         device_printf(dev,
1670                             "Unable to allocate RX Descriptor memory\n");
1671                         error = ENOMEM;
1672                         goto fail;
1673                 }
1674                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1675                 bzero((void *)rxr->base, rsize);
1676
1677                 /* Allocate receive soft structs for the ring*/
1678                 if (ixl_allocate_rx_data(que)) {
1679                         device_printf(dev,
1680                             "Critical Failure setting up receive structs\n");
1681                         error = ENOMEM;
1682                         goto fail;
1683                 }
1684         }
1685
1686         return (0);
1687
1688 fail:
1689         free(vsi->queues, M_DEVBUF);
1690         for (int i = 0; i < vsi->num_queues; i++) {
1691                 que = &vsi->queues[i];
1692                 rxr = &que->rxr;
1693                 txr = &que->txr;
1694                 if (rxr->base)
1695                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1696                 if (txr->base)
1697                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1698         }
1699
1700 early:
1701         return (error);
1702 }
1703
1704 /*
1705 ** This routine is run via an vlan config EVENT,
1706 ** it enables us to use the HW Filter table since
1707 ** we can get the vlan id. This just creates the
1708 ** entry in the soft version of the VFTA, init will
1709 ** repopulate the real table.
1710 */
1711 static void
1712 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1713 {
1714         struct ixl_vsi                  *vsi = ifp->if_softc;
1715         struct ixlv_sc          *sc = vsi->back;
1716         struct ixlv_vlan_filter *v;
1717
1718
1719         if (ifp->if_softc !=  arg)   /* Not our event */
1720                 return;
1721
1722         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1723                 return;
1724
1725         /* Sanity check - make sure it doesn't already exist */
1726         SLIST_FOREACH(v, sc->vlan_filters, next) {
1727                 if (v->vlan == vtag)
1728                         return;
1729         }
1730
1731         mtx_lock(&sc->mtx);
1732         ++vsi->num_vlans;
1733         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1734         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1735         v->vlan = vtag;
1736         v->flags = IXL_FILTER_ADD;
1737         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1738             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1739         mtx_unlock(&sc->mtx);
1740         return;
1741 }
1742
1743 /*
1744 ** This routine is run via an vlan
1745 ** unconfig EVENT, remove our entry
1746 ** in the soft vfta.
1747 */
1748 static void
1749 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1750 {
1751         struct ixl_vsi                  *vsi = ifp->if_softc;
1752         struct ixlv_sc          *sc = vsi->back;
1753         struct ixlv_vlan_filter *v;
1754         int                             i = 0;
1755         
1756         if (ifp->if_softc !=  arg)
1757                 return;
1758
1759         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1760                 return;
1761
1762         mtx_lock(&sc->mtx);
1763         SLIST_FOREACH(v, sc->vlan_filters, next) {
1764                 if (v->vlan == vtag) {
1765                         v->flags = IXL_FILTER_DEL;
1766                         ++i;
1767                         --vsi->num_vlans;
1768                 }
1769         }
1770         if (i)
1771                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1772                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1773         mtx_unlock(&sc->mtx);
1774         return;
1775 }
1776
1777 /*
1778 ** Get a new filter and add it to the mac filter list.
1779 */
1780 static struct ixlv_mac_filter *
1781 ixlv_get_mac_filter(struct ixlv_sc *sc)
1782 {
1783         struct ixlv_mac_filter  *f;
1784
1785         f = malloc(sizeof(struct ixlv_mac_filter),
1786             M_DEVBUF, M_NOWAIT | M_ZERO);
1787         if (f)
1788                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1789
1790         return (f);
1791 }
1792
1793 /*
1794 ** Find the filter with matching MAC address
1795 */
1796 static struct ixlv_mac_filter *
1797 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1798 {
1799         struct ixlv_mac_filter  *f;
1800         bool                            match = FALSE;
1801
1802         SLIST_FOREACH(f, sc->mac_filters, next) {
1803                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1804                         match = TRUE;
1805                         break;
1806                 }
1807         }       
1808
1809         if (!match)
1810                 f = NULL;
1811         return (f);
1812 }
1813
1814 /*
1815 ** Admin Queue interrupt handler
1816 */
1817 static void
1818 ixlv_msix_adminq(void *arg)
1819 {
1820         struct ixlv_sc  *sc = arg;
1821         struct i40e_hw  *hw = &sc->hw;
1822         device_t        dev = sc->dev;
1823         u32             reg, mask, oldreg;
1824
1825         reg = rd32(hw, I40E_VFINT_ICR01);
1826         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1827
1828         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1829         reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1830         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1831
1832         /* check for Admin queue errors */
1833         oldreg = reg = rd32(hw, hw->aq.arq.len);
1834         if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1835                 device_printf(dev, "ARQ VF Error detected\n");
1836                 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1837         }
1838         if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1839                 device_printf(dev, "ARQ Overflow Error detected\n");
1840                 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1841         }
1842         if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1843                 device_printf(dev, "ARQ Critical Error detected\n");
1844                 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1845         }
1846         if (oldreg != reg)
1847                 wr32(hw, hw->aq.arq.len, reg);
1848
1849         oldreg = reg = rd32(hw, hw->aq.asq.len);
1850         if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1851                 device_printf(dev, "ASQ VF Error detected\n");
1852                 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1853         }
1854         if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1855                 device_printf(dev, "ASQ Overflow Error detected\n");
1856                 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1857         }
1858         if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1859                 device_printf(dev, "ASQ Critical Error detected\n");
1860                 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1861         }
1862         if (oldreg != reg)
1863                 wr32(hw, hw->aq.asq.len, reg);
1864
1865         /* re-enable interrupt causes */
1866         wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1867         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1868
1869         /* schedule task */
1870         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1871         return;
1872 }
1873
1874 void
1875 ixlv_enable_intr(struct ixl_vsi *vsi)
1876 {
1877         struct i40e_hw          *hw = vsi->hw;
1878         struct ixl_queue        *que = vsi->queues;
1879
1880         ixlv_enable_adminq_irq(hw);
1881         for (int i = 0; i < vsi->num_queues; i++, que++)
1882                 ixlv_enable_queue_irq(hw, que->me);
1883 }
1884
1885 void
1886 ixlv_disable_intr(struct ixl_vsi *vsi)
1887 {
1888         struct i40e_hw          *hw = vsi->hw;
1889         struct ixl_queue       *que = vsi->queues;
1890
1891         ixlv_disable_adminq_irq(hw);
1892         for (int i = 0; i < vsi->num_queues; i++, que++)
1893                 ixlv_disable_queue_irq(hw, que->me);
1894 }
1895
1896
1897 static void
1898 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1899 {
1900         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1901         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1902         /* flush */
1903         rd32(hw, I40E_VFGEN_RSTAT);
1904         return;
1905 }
1906
1907 static void
1908 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1909 {
1910         wr32(hw, I40E_VFINT_DYN_CTL01,
1911             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1912             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1913         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1914         /* flush */
1915         rd32(hw, I40E_VFGEN_RSTAT);
1916         return;
1917 }
1918
1919 static void
1920 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1921 {
1922         u32             reg;
1923
1924         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1925             I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; 
1926         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1927 }
1928
1929 static void
1930 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1931 {
1932         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1933         rd32(hw, I40E_VFGEN_RSTAT);
1934         return;
1935 }
1936
1937
1938 /*
1939 ** Provide a update to the queue RX
1940 ** interrupt moderation value.
1941 */
1942 static void
1943 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1944 {
1945         struct ixl_vsi  *vsi = que->vsi;
1946         struct i40e_hw  *hw = vsi->hw;
1947         struct rx_ring  *rxr = &que->rxr;
1948         u16             rx_itr;
1949         u16             rx_latency = 0;
1950         int             rx_bytes;
1951
1952
1953         /* Idle, do nothing */
1954         if (rxr->bytes == 0)
1955                 return;
1956
1957         if (ixlv_dynamic_rx_itr) {
1958                 rx_bytes = rxr->bytes/rxr->itr;
1959                 rx_itr = rxr->itr;
1960
1961                 /* Adjust latency range */
1962                 switch (rxr->latency) {
1963                 case IXL_LOW_LATENCY:
1964                         if (rx_bytes > 10) {
1965                                 rx_latency = IXL_AVE_LATENCY;
1966                                 rx_itr = IXL_ITR_20K;
1967                         }
1968                         break;
1969                 case IXL_AVE_LATENCY:
1970                         if (rx_bytes > 20) {
1971                                 rx_latency = IXL_BULK_LATENCY;
1972                                 rx_itr = IXL_ITR_8K;
1973                         } else if (rx_bytes <= 10) {
1974                                 rx_latency = IXL_LOW_LATENCY;
1975                                 rx_itr = IXL_ITR_100K;
1976                         }
1977                         break;
1978                 case IXL_BULK_LATENCY:
1979                         if (rx_bytes <= 20) {
1980                                 rx_latency = IXL_AVE_LATENCY;
1981                                 rx_itr = IXL_ITR_20K;
1982                         }
1983                         break;
1984                  }
1985
1986                 rxr->latency = rx_latency;
1987
1988                 if (rx_itr != rxr->itr) {
1989                         /* do an exponential smoothing */
1990                         rx_itr = (10 * rx_itr * rxr->itr) /
1991                             ((9 * rx_itr) + rxr->itr);
1992                         rxr->itr = rx_itr & IXL_MAX_ITR;
1993                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1994                             que->me), rxr->itr);
1995                 }
1996         } else { /* We may have have toggled to non-dynamic */
1997                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1998                         vsi->rx_itr_setting = ixlv_rx_itr;
1999                 /* Update the hardware if needed */
2000                 if (rxr->itr != vsi->rx_itr_setting) {
2001                         rxr->itr = vsi->rx_itr_setting;
2002                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2003                             que->me), rxr->itr);
2004                 }
2005         }
2006         rxr->bytes = 0;
2007         rxr->packets = 0;
2008         return;
2009 }
2010
2011
2012 /*
2013 ** Provide a update to the queue TX
2014 ** interrupt moderation value.
2015 */
2016 static void
2017 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2018 {
2019         struct ixl_vsi  *vsi = que->vsi;
2020         struct i40e_hw  *hw = vsi->hw;
2021         struct tx_ring  *txr = &que->txr;
2022         u16             tx_itr;
2023         u16             tx_latency = 0;
2024         int             tx_bytes;
2025
2026
2027         /* Idle, do nothing */
2028         if (txr->bytes == 0)
2029                 return;
2030
2031         if (ixlv_dynamic_tx_itr) {
2032                 tx_bytes = txr->bytes/txr->itr;
2033                 tx_itr = txr->itr;
2034
2035                 switch (txr->latency) {
2036                 case IXL_LOW_LATENCY:
2037                         if (tx_bytes > 10) {
2038                                 tx_latency = IXL_AVE_LATENCY;
2039                                 tx_itr = IXL_ITR_20K;
2040                         }
2041                         break;
2042                 case IXL_AVE_LATENCY:
2043                         if (tx_bytes > 20) {
2044                                 tx_latency = IXL_BULK_LATENCY;
2045                                 tx_itr = IXL_ITR_8K;
2046                         } else if (tx_bytes <= 10) {
2047                                 tx_latency = IXL_LOW_LATENCY;
2048                                 tx_itr = IXL_ITR_100K;
2049                         }
2050                         break;
2051                 case IXL_BULK_LATENCY:
2052                         if (tx_bytes <= 20) {
2053                                 tx_latency = IXL_AVE_LATENCY;
2054                                 tx_itr = IXL_ITR_20K;
2055                         }
2056                         break;
2057                 }
2058
2059                 txr->latency = tx_latency;
2060
2061                 if (tx_itr != txr->itr) {
2062                  /* do an exponential smoothing */
2063                         tx_itr = (10 * tx_itr * txr->itr) /
2064                             ((9 * tx_itr) + txr->itr);
2065                         txr->itr = tx_itr & IXL_MAX_ITR;
2066                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2067                             que->me), txr->itr);
2068                 }
2069
2070         } else { /* We may have have toggled to non-dynamic */
2071                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2072                         vsi->tx_itr_setting = ixlv_tx_itr;
2073                 /* Update the hardware if needed */
2074                 if (txr->itr != vsi->tx_itr_setting) {
2075                         txr->itr = vsi->tx_itr_setting;
2076                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2077                             que->me), txr->itr);
2078                 }
2079         }
2080         txr->bytes = 0;
2081         txr->packets = 0;
2082         return;
2083 }
2084
2085
2086 /*
2087 **
2088 ** MSIX Interrupt Handlers and Tasklets
2089 **
2090 */
2091 static void
2092 ixlv_handle_que(void *context, int pending)
2093 {
2094         struct ixl_queue *que = context;
2095         struct ixl_vsi *vsi = que->vsi;
2096         struct i40e_hw  *hw = vsi->hw;
2097         struct tx_ring  *txr = &que->txr;
2098         struct ifnet    *ifp = vsi->ifp;
2099         bool            more;
2100
2101         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2102                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2103                 mtx_lock(&txr->mtx);
2104                 ixl_txeof(que);
2105                 if (!drbr_empty(ifp, txr->br))
2106                         ixl_mq_start_locked(ifp, txr);
2107                 mtx_unlock(&txr->mtx);
2108                 if (more) {
2109                         taskqueue_enqueue(que->tq, &que->task);
2110                         return;
2111                 }
2112         }
2113
2114         /* Reenable this interrupt - hmmm */
2115         ixlv_enable_queue_irq(hw, que->me);
2116         return;
2117 }
2118
2119
2120 /*********************************************************************
2121  *
2122  *  MSIX Queue Interrupt Service routine
2123  *
2124  **********************************************************************/
2125 static void
2126 ixlv_msix_que(void *arg)
2127 {
2128         struct ixl_queue        *que = arg;
2129         struct ixl_vsi  *vsi = que->vsi;
2130         struct i40e_hw  *hw = vsi->hw;
2131         struct tx_ring  *txr = &que->txr;
2132         bool            more_tx, more_rx;
2133
2134         /* Spurious interrupts are ignored */
2135         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2136                 return;
2137
2138         ++que->irqs;
2139
2140         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2141
2142         mtx_lock(&txr->mtx);
2143         more_tx = ixl_txeof(que);
2144         /*
2145         ** Make certain that if the stack 
2146         ** has anything queued the task gets
2147         ** scheduled to handle it.
2148         */
2149         if (!drbr_empty(vsi->ifp, txr->br))
2150                 more_tx = 1;
2151         mtx_unlock(&txr->mtx);
2152
2153         ixlv_set_queue_rx_itr(que);
2154         ixlv_set_queue_tx_itr(que);
2155
2156         if (more_tx || more_rx)
2157                 taskqueue_enqueue(que->tq, &que->task);
2158         else
2159                 ixlv_enable_queue_irq(hw, que->me);
2160
2161         return;
2162 }
2163
2164
2165 /*********************************************************************
2166  *
2167  *  Media Ioctl callback
2168  *
2169  *  This routine is called whenever the user queries the status of
2170  *  the interface using ifconfig.
2171  *
2172  **********************************************************************/
2173 static void
2174 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2175 {
2176         struct ixl_vsi          *vsi = ifp->if_softc;
2177         struct ixlv_sc  *sc = vsi->back;
2178
2179         INIT_DBG_IF(ifp, "begin");
2180
2181         mtx_lock(&sc->mtx);
2182
2183         ixlv_update_link_status(sc);
2184
2185         ifmr->ifm_status = IFM_AVALID;
2186         ifmr->ifm_active = IFM_ETHER;
2187
2188         if (!vsi->link_up) {
2189                 mtx_unlock(&sc->mtx);
2190                 INIT_DBG_IF(ifp, "end: link not up");
2191                 return;
2192         }
2193
2194         ifmr->ifm_status |= IFM_ACTIVE;
2195         /* Hardware is always full-duplex */
2196         ifmr->ifm_active |= IFM_FDX;
2197         mtx_unlock(&sc->mtx);
2198         INIT_DBG_IF(ifp, "end");
2199         return;
2200 }
2201
2202 /*********************************************************************
2203  *
2204  *  Media Ioctl callback
2205  *
2206  *  This routine is called when the user changes speed/duplex using
2207  *  media/mediopt option with ifconfig.
2208  *
2209  **********************************************************************/
2210 static int
2211 ixlv_media_change(struct ifnet * ifp)
2212 {
2213         struct ixl_vsi *vsi = ifp->if_softc;
2214         struct ifmedia *ifm = &vsi->media;
2215
2216         INIT_DBG_IF(ifp, "begin");
2217
2218         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2219                 return (EINVAL);
2220
2221         INIT_DBG_IF(ifp, "end");
2222         return (0);
2223 }
2224
2225
2226 /*********************************************************************
2227  *  Multicast Initialization
2228  *
2229  *  This routine is called by init to reset a fresh state.
2230  *
2231  **********************************************************************/
2232
2233 static void
2234 ixlv_init_multi(struct ixl_vsi *vsi)
2235 {
2236         struct ixlv_mac_filter *f;
2237         struct ixlv_sc  *sc = vsi->back;
2238         int                     mcnt = 0;
2239
2240         IOCTL_DBG_IF(vsi->ifp, "begin");
2241
2242         /* First clear any multicast filters */
2243         SLIST_FOREACH(f, sc->mac_filters, next) {
2244                 if ((f->flags & IXL_FILTER_USED)
2245                     && (f->flags & IXL_FILTER_MC)) {
2246                         f->flags |= IXL_FILTER_DEL;
2247                         mcnt++;
2248                 }
2249         }
2250         if (mcnt > 0)
2251                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2252                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2253                     sc);
2254
2255         IOCTL_DBG_IF(vsi->ifp, "end");
2256 }
2257
2258 static void
2259 ixlv_add_multi(struct ixl_vsi *vsi)
2260 {
2261         struct ifmultiaddr      *ifma;
2262         struct ifnet            *ifp = vsi->ifp;
2263         struct ixlv_sc  *sc = vsi->back;
2264         int                     mcnt = 0;
2265
2266         IOCTL_DBG_IF(ifp, "begin");
2267
2268         if_maddr_rlock(ifp);
2269         /*
2270         ** Get a count, to decide if we
2271         ** simply use multicast promiscuous.
2272         */
2273         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2274                 if (ifma->ifma_addr->sa_family != AF_LINK)
2275                         continue;
2276                 mcnt++;
2277         }
2278         if_maddr_runlock(ifp);
2279
2280         // TODO: Remove -- cannot set promiscuous mode in a VF
2281         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2282                 /* delete all multicast filters */
2283                 ixlv_init_multi(vsi);
2284                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2285                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2286                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2287                     sc);
2288                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2289                 return;
2290         }
2291
2292         mcnt = 0;
2293         if_maddr_rlock(ifp);
2294         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2295                 if (ifma->ifma_addr->sa_family != AF_LINK)
2296                         continue;
2297                 if (!ixlv_add_mac_filter(sc,
2298                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2299                     IXL_FILTER_MC))
2300                         mcnt++;
2301         }
2302         if_maddr_runlock(ifp);
2303         /*
2304         ** Notify AQ task that sw filters need to be
2305         ** added to hw list
2306         */
2307         if (mcnt > 0)
2308                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2309                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2310                     sc);
2311
2312         IOCTL_DBG_IF(ifp, "end");
2313 }
2314
2315 static void
2316 ixlv_del_multi(struct ixl_vsi *vsi)
2317 {
2318         struct ixlv_mac_filter *f;
2319         struct ifmultiaddr      *ifma;
2320         struct ifnet            *ifp = vsi->ifp;
2321         struct ixlv_sc  *sc = vsi->back;
2322         int                     mcnt = 0;
2323         bool            match = FALSE;
2324
2325         IOCTL_DBG_IF(ifp, "begin");
2326
2327         /* Search for removed multicast addresses */
2328         if_maddr_rlock(ifp);
2329         SLIST_FOREACH(f, sc->mac_filters, next) {
2330                 if ((f->flags & IXL_FILTER_USED)
2331                     && (f->flags & IXL_FILTER_MC)) {
2332                         /* check if mac address in filter is in sc's list */
2333                         match = FALSE;
2334                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2335                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2336                                         continue;
2337                                 u8 *mc_addr =
2338                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2339                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2340                                         match = TRUE;
2341                                         break;
2342                                 }
2343                         }
2344                         /* if this filter is not in the sc's list, remove it */
2345                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2346                                 f->flags |= IXL_FILTER_DEL;
2347                                 mcnt++;
2348                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2349                                     MAC_FORMAT_ARGS(f->macaddr));
2350                         }
2351                         else if (match == FALSE)
2352                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2353                                     MAC_FORMAT_ARGS(f->macaddr));
2354                 }
2355         }
2356         if_maddr_runlock(ifp);
2357
2358         if (mcnt > 0)
2359                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2360                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2361                     sc);
2362
2363         IOCTL_DBG_IF(ifp, "end");
2364 }
2365
2366 /*********************************************************************
2367  *  Timer routine
2368  *
2369  *  This routine checks for link status,updates statistics,
2370  *  and runs the watchdog check.
2371  *
2372  **********************************************************************/
2373
2374 static void
2375 ixlv_local_timer(void *arg)
2376 {
2377         struct ixlv_sc  *sc = arg;
2378         struct i40e_hw          *hw = &sc->hw;
2379         struct ixl_vsi          *vsi = &sc->vsi;
2380         struct ixl_queue        *que = vsi->queues;
2381         device_t                dev = sc->dev;
2382         int                     hung = 0;
2383         u32                     mask, val;
2384
2385         IXLV_CORE_LOCK_ASSERT(sc);
2386
2387         /* If Reset is in progress just bail */
2388         if (sc->init_state == IXLV_RESET_PENDING)
2389                 return;
2390
2391         /* Check for when PF triggers a VF reset */
2392         val = rd32(hw, I40E_VFGEN_RSTAT) &
2393             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2394
2395         if (val != I40E_VFR_VFACTIVE
2396             && val != I40E_VFR_COMPLETED) {
2397                 DDPRINTF(dev, "reset in progress! (%d)", val);
2398                 return;
2399         }
2400
2401         ixlv_request_stats(sc);
2402
2403         /* clean and process any events */
2404         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2405
2406         /*
2407         ** Check status on the queues for a hang
2408         */
2409         mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2410             I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2411
2412         for (int i = 0; i < vsi->num_queues; i++,que++) {
2413                 /* Any queues with outstanding work get a sw irq */
2414                 if (que->busy)
2415                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2416                 /*
2417                 ** Each time txeof runs without cleaning, but there
2418                 ** are uncleaned descriptors it increments busy. If
2419                 ** we get to 5 we declare it hung.
2420                 */
2421                 if (que->busy == IXL_QUEUE_HUNG) {
2422                         ++hung;
2423                         /* Mark the queue as inactive */
2424                         vsi->active_queues &= ~((u64)1 << que->me);
2425                         continue;
2426                 } else {
2427                         /* Check if we've come back from hung */
2428                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2429                                 vsi->active_queues |= ((u64)1 << que->me);
2430                 }
2431                 if (que->busy >= IXL_MAX_TX_BUSY) {
2432                         device_printf(dev,"Warning queue %d "
2433                             "appears to be hung!\n", i);
2434                         que->busy = IXL_QUEUE_HUNG;
2435                         ++hung;
2436                 }
2437         }
2438         /* Only reset when all queues show hung */
2439         if (hung == vsi->num_queues)
2440                 goto hung;
2441         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2442         return;
2443
2444 hung:
2445         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2446         sc->init_state = IXLV_RESET_REQUIRED;
2447         ixlv_init_locked(sc);
2448 }
2449
2450 /*
2451 ** Note: this routine updates the OS on the link state
2452 **      the real check of the hardware only happens with
2453 **      a link interrupt.
2454 */
2455 void
2456 ixlv_update_link_status(struct ixlv_sc *sc)
2457 {
2458         struct ixl_vsi          *vsi = &sc->vsi;
2459         struct ifnet            *ifp = vsi->ifp;
2460         device_t                 dev = sc->dev;
2461
2462         if (vsi->link_up){ 
2463                 if (vsi->link_active == FALSE) {
2464                         if (bootverbose)
2465                                 device_printf(dev,"Link is Up, %d Gbps\n",
2466                                     (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2467                         vsi->link_active = TRUE;
2468                         if_link_state_change(ifp, LINK_STATE_UP);
2469                 }
2470         } else { /* Link down */
2471                 if (vsi->link_active == TRUE) {
2472                         if (bootverbose)
2473                                 device_printf(dev,"Link is Down\n");
2474                         if_link_state_change(ifp, LINK_STATE_DOWN);
2475                         vsi->link_active = FALSE;
2476                 }
2477         }
2478
2479         return;
2480 }
2481
2482 /*********************************************************************
2483  *
2484  *  This routine disables all traffic on the adapter by issuing a
2485  *  global reset on the MAC and deallocates TX/RX buffers.
2486  *
2487  **********************************************************************/
2488
2489 static void
2490 ixlv_stop(struct ixlv_sc *sc)
2491 {
2492         struct ifnet *ifp;
2493         int start;
2494
2495         ifp = sc->vsi.ifp;
2496         INIT_DBG_IF(ifp, "begin");
2497
2498         IXLV_CORE_LOCK_ASSERT(sc);
2499
2500         ixl_vc_flush(&sc->vc_mgr);
2501         ixlv_disable_queues(sc);
2502
2503         start = ticks;
2504         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2505             ((ticks - start) < hz/10))
2506                 ixlv_do_adminq_locked(sc);
2507
2508         /* Stop the local timer */
2509         callout_stop(&sc->timer);
2510
2511         INIT_DBG_IF(ifp, "end");
2512 }
2513
2514
2515 /*********************************************************************
2516  *
2517  *  Free all station queue structs.
2518  *
2519  **********************************************************************/
2520 static void
2521 ixlv_free_queues(struct ixl_vsi *vsi)
2522 {
2523         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2524         struct ixl_queue        *que = vsi->queues;
2525
2526         for (int i = 0; i < vsi->num_queues; i++, que++) {
2527                 struct tx_ring *txr = &que->txr;
2528                 struct rx_ring *rxr = &que->rxr;
2529         
2530                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2531                         continue;
2532                 IXL_TX_LOCK(txr);
2533                 ixl_free_que_tx(que);
2534                 if (txr->base)
2535                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2536                 IXL_TX_UNLOCK(txr);
2537                 IXL_TX_LOCK_DESTROY(txr);
2538
2539                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2540                         continue;
2541                 IXL_RX_LOCK(rxr);
2542                 ixl_free_que_rx(que);
2543                 if (rxr->base)
2544                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2545                 IXL_RX_UNLOCK(rxr);
2546                 IXL_RX_LOCK_DESTROY(rxr);
2547                 
2548         }
2549         free(vsi->queues, M_DEVBUF);
2550 }
2551
2552
2553 /*
2554 ** ixlv_config_rss - setup RSS 
2555 **
2556 ** RSS keys and table are cleared on VF reset.
2557 */
2558 static void
2559 ixlv_config_rss(struct ixlv_sc *sc)
2560 {
2561         struct i40e_hw  *hw = &sc->hw;
2562         struct ixl_vsi  *vsi = &sc->vsi;
2563         u32             lut = 0;
2564         u64             set_hena = 0, hena;
2565         int             i, j, que_id;
2566 #ifdef RSS
2567         u32             rss_hash_config;
2568         u32             rss_seed[IXL_KEYSZ];
2569 #else
2570         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2571                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2572                             0x35897377, 0x328b25e1, 0x4fa98922,
2573                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2574 #endif
2575         
2576         /* Don't set up RSS if using a single queue */
2577         if (vsi->num_queues == 1) {
2578                 wr32(hw, I40E_VFQF_HENA(0), 0);
2579                 wr32(hw, I40E_VFQF_HENA(1), 0);
2580                 ixl_flush(hw);
2581                 return;
2582         }
2583
2584 #ifdef RSS
2585         /* Fetch the configured RSS key */
2586         rss_getkey((uint8_t *) &rss_seed);
2587 #endif
2588         /* Fill out hash function seed */
2589         for (i = 0; i <= IXL_KEYSZ; i++)
2590                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2591
2592         /* Enable PCTYPES for RSS: */
2593 #ifdef RSS
2594         rss_hash_config = rss_gethashconfig();
2595         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2596                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2597         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2598                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2599         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2600                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2601         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2602                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2603         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2604                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2605         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2606                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2607         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2608                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2609 #else
2610         set_hena =
2611                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2612                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2613                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2614                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2615                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2616                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2617                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2618                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2619                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2620                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2621                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2622 #endif
2623         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2624             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2625         hena |= set_hena;
2626         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2627         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2628
2629         /* Populate the LUT with max no. of queues in round robin fashion */
2630         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2631                 if (j == vsi->num_queues)
2632                         j = 0;
2633 #ifdef RSS
2634                 /*
2635                  * Fetch the RSS bucket id for the given indirection entry.
2636                  * Cap it at the number of configured buckets (which is
2637                  * num_queues.)
2638                  */
2639                 que_id = rss_get_indirection_to_bucket(i);
2640                 que_id = que_id % vsi->num_queues;
2641 #else
2642                 que_id = j;
2643 #endif
2644                 /* lut = 4-byte sliding window of 4 lut entries */
2645                 lut = (lut << 8) | (que_id & 0xF);
2646                 /* On i = 3, we have 4 entries in lut; write to the register */
2647                 if ((i & 3) == 3) {
2648                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2649                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2650                 }
2651         }
2652         ixl_flush(hw);
2653 }
2654
2655
2656 /*
2657 ** This routine refreshes vlan filters, called by init
2658 ** it scans the filter table and then updates the AQ
2659 */
2660 static void
2661 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2662 {
2663         struct ixl_vsi                  *vsi = &sc->vsi;
2664         struct ixlv_vlan_filter *f;
2665         int                             cnt = 0;
2666
2667         if (vsi->num_vlans == 0)
2668                 return;
2669         /*
2670         ** Scan the filter table for vlan entries,
2671         ** and if found call for the AQ update.
2672         */
2673         SLIST_FOREACH(f, sc->vlan_filters, next)
2674                 if (f->flags & IXL_FILTER_ADD)
2675                         cnt++;
2676         if (cnt > 0)
2677                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2678                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2679 }
2680
2681
2682 /*
2683 ** This routine adds new MAC filters to the sc's list;
2684 ** these are later added in hardware by sending a virtual
2685 ** channel message.
2686 */
2687 static int
2688 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2689 {
2690         struct ixlv_mac_filter  *f;
2691         device_t                        dev = sc->dev;
2692
2693         /* Does one already exist? */
2694         f = ixlv_find_mac_filter(sc, macaddr);
2695         if (f != NULL) {
2696                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2697                     MAC_FORMAT_ARGS(macaddr));
2698                 return (EEXIST);
2699         }
2700
2701         /* If not, get a new empty filter */
2702         f = ixlv_get_mac_filter(sc);
2703         if (f == NULL) {
2704                 device_printf(dev, "%s: no filters available!!\n",
2705                     __func__);
2706                 return (ENOMEM);
2707         }
2708
2709         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2710             MAC_FORMAT_ARGS(macaddr));
2711
2712         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2713         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2714         f->flags |= flags;
2715         return (0);
2716 }
2717
2718 /*
2719 ** Marks a MAC filter for deletion.
2720 */
2721 static int
2722 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2723 {
2724         struct ixlv_mac_filter  *f;
2725
2726         f = ixlv_find_mac_filter(sc, macaddr);
2727         if (f == NULL)
2728                 return (ENOENT);
2729
2730         f->flags |= IXL_FILTER_DEL;
2731         return (0);
2732 }
2733
2734 /*
2735 ** Tasklet handler for MSIX Adminq interrupts
2736 **  - done outside interrupt context since it might sleep
2737 */
2738 static void
2739 ixlv_do_adminq(void *context, int pending)
2740 {
2741         struct ixlv_sc          *sc = context;
2742
2743         mtx_lock(&sc->mtx);
2744         ixlv_do_adminq_locked(sc);
2745         mtx_unlock(&sc->mtx);
2746         return;
2747 }
2748
2749 static void
2750 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2751 {
2752         struct i40e_hw                  *hw = &sc->hw;
2753         struct i40e_arq_event_info      event;
2754         struct i40e_virtchnl_msg        *v_msg;
2755         i40e_status                     ret;
2756         u16                             result = 0;
2757
2758         IXLV_CORE_LOCK_ASSERT(sc);
2759
2760         event.buf_len = IXL_AQ_BUF_SZ;
2761         event.msg_buf = sc->aq_buffer;
2762         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2763
2764         do {
2765                 ret = i40e_clean_arq_element(hw, &event, &result);
2766                 if (ret)
2767                         break;
2768                 ixlv_vc_completion(sc, v_msg->v_opcode,
2769                     v_msg->v_retval, event.msg_buf, event.msg_len);
2770                 if (result != 0)
2771                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2772         } while (result);
2773
2774         ixlv_enable_adminq_irq(hw);
2775 }
2776
2777 static void
2778 ixlv_add_sysctls(struct ixlv_sc *sc)
2779 {
2780         device_t dev = sc->dev;
2781         struct ixl_vsi *vsi = &sc->vsi;
2782         struct i40e_eth_stats *es = &vsi->eth_stats;
2783
2784         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2785         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2786         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2787
2788         struct sysctl_oid *vsi_node, *queue_node;
2789         struct sysctl_oid_list *vsi_list, *queue_list;
2790
2791 #define QUEUE_NAME_LEN 32
2792         char queue_namebuf[QUEUE_NAME_LEN];
2793
2794         struct ixl_queue *queues = vsi->queues;
2795         struct tx_ring *txr;
2796         struct rx_ring *rxr;
2797
2798         /* Driver statistics sysctls */
2799         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2800                         CTLFLAG_RD, &sc->watchdog_events,
2801                         "Watchdog timeouts");
2802         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2803                         CTLFLAG_RD, &sc->admin_irq,
2804                         "Admin Queue IRQ Handled");
2805
2806         /* VSI statistics sysctls */
2807         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2808                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2809         vsi_list = SYSCTL_CHILDREN(vsi_node);
2810
2811         struct ixl_sysctl_info ctls[] =
2812         {
2813                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2814                 {&es->rx_unicast, "ucast_pkts_rcvd",
2815                         "Unicast Packets Received"},
2816                 {&es->rx_multicast, "mcast_pkts_rcvd",
2817                         "Multicast Packets Received"},
2818                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2819                         "Broadcast Packets Received"},
2820                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2821                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2822                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2823                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2824                 {&es->tx_multicast, "mcast_pkts_txd",
2825                         "Multicast Packets Transmitted"},
2826                 {&es->tx_broadcast, "bcast_pkts_txd",
2827                         "Broadcast Packets Transmitted"},
2828                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2829                 // end
2830                 {0,0,0}
2831         };
2832         struct ixl_sysctl_info *entry = ctls;
2833         while (entry->stat != 0)
2834         {
2835                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2836                                 CTLFLAG_RD, entry->stat,
2837                                 entry->description);
2838                 entry++;
2839         }
2840
2841         /* Queue sysctls */
2842         for (int q = 0; q < vsi->num_queues; q++) {
2843                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2844                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2845                                              CTLFLAG_RD, NULL, "Queue Name");
2846                 queue_list = SYSCTL_CHILDREN(queue_node);
2847
2848                 txr = &(queues[q].txr);
2849                 rxr = &(queues[q].rxr);
2850
2851                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2852                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2853                                 "m_defrag() failed");
2854                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2855                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2856                                 "Driver dropped packets");
2857                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2858                                 CTLFLAG_RD, &(queues[q].irqs),
2859                                 "irqs on this queue");
2860                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2861                                 CTLFLAG_RD, &(queues[q].tso),
2862                                 "TSO");
2863                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2864                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2865                                 "Driver tx dma failure in xmit");
2866                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2867                                 CTLFLAG_RD, &(txr->no_desc),
2868                                 "Queue No Descriptor Available");
2869                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2870                                 CTLFLAG_RD, &(txr->total_packets),
2871                                 "Queue Packets Transmitted");
2872                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2873                                 CTLFLAG_RD, &(txr->tx_bytes),
2874                                 "Queue Bytes Transmitted");
2875                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2876                                 CTLFLAG_RD, &(rxr->rx_packets),
2877                                 "Queue Packets Received");
2878                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2879                                 CTLFLAG_RD, &(rxr->rx_bytes),
2880                                 "Queue Bytes Received");
2881
2882                 /* Examine queue state */
2883                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2884                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2885                                 sizeof(struct ixl_queue),
2886                                 ixlv_sysctl_qtx_tail_handler, "IU",
2887                                 "Queue Transmit Descriptor Tail");
2888                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2889                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2890                                 sizeof(struct ixl_queue),
2891                                 ixlv_sysctl_qrx_tail_handler, "IU",
2892                                 "Queue Receive Descriptor Tail");
2893         }
2894 }
2895
2896 static void
2897 ixlv_init_filters(struct ixlv_sc *sc)
2898 {
2899         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2900             M_DEVBUF, M_NOWAIT | M_ZERO);
2901         SLIST_INIT(sc->mac_filters);
2902         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2903             M_DEVBUF, M_NOWAIT | M_ZERO);
2904         SLIST_INIT(sc->vlan_filters);
2905         return;
2906 }
2907
2908 static void
2909 ixlv_free_filters(struct ixlv_sc *sc)
2910 {
2911         struct ixlv_mac_filter *f;
2912         struct ixlv_vlan_filter *v;
2913
2914         while (!SLIST_EMPTY(sc->mac_filters)) {
2915                 f = SLIST_FIRST(sc->mac_filters);
2916                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2917                 free(f, M_DEVBUF);
2918         }
2919         while (!SLIST_EMPTY(sc->vlan_filters)) {
2920                 v = SLIST_FIRST(sc->vlan_filters);
2921                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2922                 free(v, M_DEVBUF);
2923         }
2924         return;
2925 }
2926
2927 /**
2928  * ixlv_sysctl_qtx_tail_handler
2929  * Retrieves I40E_QTX_TAIL1 value from hardware
2930  * for a sysctl.
2931  */
2932 static int 
2933 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2934 {
2935         struct ixl_queue *que;
2936         int error;
2937         u32 val;
2938
2939         que = ((struct ixl_queue *)oidp->oid_arg1);
2940         if (!que) return 0;
2941
2942         val = rd32(que->vsi->hw, que->txr.tail);
2943         error = sysctl_handle_int(oidp, &val, 0, req);
2944         if (error || !req->newptr)
2945                 return error;
2946         return (0);
2947 }
2948
2949 /**
2950  * ixlv_sysctl_qrx_tail_handler
2951  * Retrieves I40E_QRX_TAIL1 value from hardware
2952  * for a sysctl.
2953  */
2954 static int 
2955 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2956 {
2957         struct ixl_queue *que;
2958         int error;
2959         u32 val;
2960
2961         que = ((struct ixl_queue *)oidp->oid_arg1);
2962         if (!que) return 0;
2963
2964         val = rd32(que->vsi->hw, que->rxr.tail);
2965         error = sysctl_handle_int(oidp, &val, 0, req);
2966         if (error || !req->newptr)
2967                 return error;
2968         return (0);
2969 }
2970