]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
Do not initialize the adapter on MTU change when adapter status is down.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40
41 #include "ixl.h"
42 #include "ixlv.h"
43
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixlv_driver_version[] = "1.2.11-k";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixlv_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixl_vendor_info_t ixlv_vendor_info_array[] =
64 {
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
67         /* required last entry */
68         {0, 0, 0, 0, 0}
69 };
70
71 /*********************************************************************
72  *  Table of branding strings
73  *********************************************************************/
74
75 static char    *ixlv_strings[] = {
76         "Intel(R) Ethernet Connection XL710 VF Driver"
77 };
78
79
80 /*********************************************************************
81  *  Function prototypes
82  *********************************************************************/
83 static int      ixlv_probe(device_t);
84 static int      ixlv_attach(device_t);
85 static int      ixlv_detach(device_t);
86 static int      ixlv_shutdown(device_t);
87 static void     ixlv_init_locked(struct ixlv_sc *);
88 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
89 static void     ixlv_free_pci_resources(struct ixlv_sc *);
90 static int      ixlv_assign_msix(struct ixlv_sc *);
91 static int      ixlv_init_msix(struct ixlv_sc *);
92 static int      ixlv_init_taskqueue(struct ixlv_sc *);
93 static int      ixlv_setup_queues(struct ixlv_sc *);
94 static void     ixlv_config_rss(struct ixlv_sc *);
95 static void     ixlv_stop(struct ixlv_sc *);
96 static void     ixlv_add_multi(struct ixl_vsi *);
97 static void     ixlv_del_multi(struct ixl_vsi *);
98 static void     ixlv_free_queues(struct ixl_vsi *);
99 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
100
101 static int      ixlv_media_change(struct ifnet *);
102 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
103
104 static void     ixlv_local_timer(void *);
105
106 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
107 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
108 static void     ixlv_init_filters(struct ixlv_sc *);
109 static void     ixlv_free_filters(struct ixlv_sc *);
110
111 static void     ixlv_msix_que(void *);
112 static void     ixlv_msix_adminq(void *);
113 static void     ixlv_do_adminq(void *, int);
114 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
115 static void     ixlv_handle_que(void *, int);
116 static int      ixlv_reset(struct ixlv_sc *);
117 static int      ixlv_reset_complete(struct i40e_hw *);
118 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
119 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
120 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
121                     enum i40e_status_code);
122
123 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
127
128 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
131
132 static void     ixlv_init_hw(struct ixlv_sc *);
133 static int      ixlv_setup_vc(struct ixlv_sc *);
134 static int      ixlv_vf_config(struct ixlv_sc *);
135
136 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
137                     struct ifnet *, int);
138
139 static void     ixlv_add_sysctls(struct ixlv_sc *);
140 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
141 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
142
143 /*********************************************************************
144  *  FreeBSD Device Interface Entry Points
145  *********************************************************************/
146
147 static device_method_t ixlv_methods[] = {
148         /* Device interface */
149         DEVMETHOD(device_probe, ixlv_probe),
150         DEVMETHOD(device_attach, ixlv_attach),
151         DEVMETHOD(device_detach, ixlv_detach),
152         DEVMETHOD(device_shutdown, ixlv_shutdown),
153         {0, 0}
154 };
155
156 static driver_t ixlv_driver = {
157         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
158 };
159
160 devclass_t ixlv_devclass;
161 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
162
163 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
164 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
165
166 /*
167 ** TUNEABLE PARAMETERS:
168 */
169
170 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
171                    "IXLV driver parameters");
172
173 /*
174 ** Number of descriptors per ring:
175 **   - TX and RX are the same size
176 */
177 static int ixlv_ringsz = DEFAULT_RING;
178 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
179 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
180     &ixlv_ringsz, 0, "Descriptor Ring Size");
181
182 /* Set to zero to auto calculate  */
183 int ixlv_max_queues = 0;
184 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
185 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
186     &ixlv_max_queues, 0, "Number of Queues");
187
188 /*
189 ** Number of entries in Tx queue buf_ring.
190 ** Increasing this will reduce the number of
191 ** errors when transmitting fragmented UDP
192 ** packets.
193 */
194 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
195 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
196 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
197     &ixlv_txbrsz, 0, "TX Buf Ring Size");
198
199 /*
200 ** Controls for Interrupt Throttling
201 **      - true/false for dynamic adjustment
202 **      - default values for static ITR
203 */
204 int ixlv_dynamic_rx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
207     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
208
209 int ixlv_dynamic_tx_itr = 0;
210 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
212     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
213
214 int ixlv_rx_itr = IXL_ITR_8K;
215 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
217     &ixlv_rx_itr, 0, "RX Interrupt Rate");
218
219 int ixlv_tx_itr = IXL_ITR_4K;
220 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
221 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
222     &ixlv_tx_itr, 0, "TX Interrupt Rate");
223
224         
225 /*********************************************************************
226  *  Device identification routine
227  *
228  *  ixlv_probe determines if the driver should be loaded on
229  *  the hardware based on PCI vendor/device id of the device.
230  *
231  *  return BUS_PROBE_DEFAULT on success, positive on failure
232  *********************************************************************/
233
234 static int
235 ixlv_probe(device_t dev)
236 {
237         ixl_vendor_info_t *ent;
238
239         u16     pci_vendor_id, pci_device_id;
240         u16     pci_subvendor_id, pci_subdevice_id;
241         char    device_name[256];
242
243 #if 0
244         INIT_DEBUGOUT("ixlv_probe: begin");
245 #endif
246
247         pci_vendor_id = pci_get_vendor(dev);
248         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
249                 return (ENXIO);
250
251         pci_device_id = pci_get_device(dev);
252         pci_subvendor_id = pci_get_subvendor(dev);
253         pci_subdevice_id = pci_get_subdevice(dev);
254
255         ent = ixlv_vendor_info_array;
256         while (ent->vendor_id != 0) {
257                 if ((pci_vendor_id == ent->vendor_id) &&
258                     (pci_device_id == ent->device_id) &&
259
260                     ((pci_subvendor_id == ent->subvendor_id) ||
261                      (ent->subvendor_id == 0)) &&
262
263                     ((pci_subdevice_id == ent->subdevice_id) ||
264                      (ent->subdevice_id == 0))) {
265                         sprintf(device_name, "%s, Version - %s",
266                                 ixlv_strings[ent->index],
267                                 ixlv_driver_version);
268                         device_set_desc_copy(dev, device_name);
269                         return (BUS_PROBE_DEFAULT);
270                 }
271                 ent++;
272         }
273         return (ENXIO);
274 }
275
276 /*********************************************************************
277  *  Device initialization routine
278  *
279  *  The attach entry point is called when the driver is being loaded.
280  *  This routine identifies the type of hardware, allocates all resources
281  *  and initializes the hardware.
282  *
283  *  return 0 on success, positive on failure
284  *********************************************************************/
285
286 static int
287 ixlv_attach(device_t dev)
288 {
289         struct ixlv_sc  *sc;
290         struct i40e_hw  *hw;
291         struct ixl_vsi  *vsi;
292         int             error = 0;
293
294         INIT_DBG_DEV(dev, "begin");
295
296         /* Allocate, clear, and link in our primary soft structure */
297         sc = device_get_softc(dev);
298         sc->dev = sc->osdep.dev = dev;
299         hw = &sc->hw;
300         vsi = &sc->vsi;
301         vsi->dev = dev;
302
303         /* Initialize hw struct */
304         ixlv_init_hw(sc);
305
306         /* Allocate filter lists */
307         ixlv_init_filters(sc);
308
309         /* Core Lock Init*/
310         mtx_init(&sc->mtx, device_get_nameunit(dev),
311             "IXL SC Lock", MTX_DEF);
312
313         /* Set up the timer callout */
314         callout_init_mtx(&sc->timer, &sc->mtx, 0);
315
316         /* Do PCI setup - map BAR0, etc */
317         if (ixlv_allocate_pci_resources(sc)) {
318                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
319                     __func__);
320                 error = ENXIO;
321                 goto err_early;
322         }
323
324         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
325
326         error = i40e_set_mac_type(hw);
327         if (error) {
328                 device_printf(dev, "%s: set_mac_type failed: %d\n",
329                     __func__, error);
330                 goto err_pci_res;
331         }
332
333         error = ixlv_reset_complete(hw);
334         if (error) {
335                 device_printf(dev, "%s: Device is still being reset\n",
336                     __func__);
337                 goto err_pci_res;
338         }
339
340         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
341
342         error = ixlv_setup_vc(sc);
343         if (error) {
344                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
345                     __func__, error);
346                 goto err_pci_res;
347         }
348
349         INIT_DBG_DEV(dev, "PF API version verified");
350
351         /* Need API version before sending reset message */
352         error = ixlv_reset(sc);
353         if (error) {
354                 device_printf(dev, "VF reset failed; reload the driver\n");
355                 goto err_aq;
356         }
357
358         INIT_DBG_DEV(dev, "VF reset complete");
359
360         /* Ask for VF config from PF */
361         error = ixlv_vf_config(sc);
362         if (error) {
363                 device_printf(dev, "Error getting configuration from PF: %d\n",
364                     error);
365                 goto err_aq;
366         }
367
368         INIT_DBG_DEV(dev, "VF config from PF:");
369         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
370             sc->vf_res->num_vsis,
371             sc->vf_res->num_queue_pairs,
372             sc->vf_res->max_vectors,
373             sc->vf_res->max_mtu);
374         INIT_DBG_DEV(dev, "Offload flags: %#010x",
375             sc->vf_res->vf_offload_flags);
376
377         /* got VF config message back from PF, now we can parse it */
378         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
379                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
380                         sc->vsi_res = &sc->vf_res->vsi_res[i];
381         }
382         if (!sc->vsi_res) {
383                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
384                 error = EIO;
385                 goto err_res_buf;
386         }
387
388         INIT_DBG_DEV(dev, "Resource Acquisition complete");
389
390         /* If no mac address was assigned just make a random one */
391         if (!ixlv_check_ether_addr(hw->mac.addr)) {
392                 u8 addr[ETHER_ADDR_LEN];
393                 arc4rand(&addr, sizeof(addr), 0);
394                 addr[0] &= 0xFE;
395                 addr[0] |= 0x02;
396                 bcopy(addr, hw->mac.addr, sizeof(addr));
397         }
398
399         vsi->id = sc->vsi_res->vsi_id;
400         vsi->back = (void *)sc;
401         sc->link_up = TRUE;
402
403         /* This allocates the memory and early settings */
404         if (ixlv_setup_queues(sc) != 0) {
405                 device_printf(dev, "%s: setup queues failed!\n",
406                     __func__);
407                 error = EIO;
408                 goto out;
409         }
410
411         /* Setup the stack interface */
412         if (ixlv_setup_interface(dev, sc) != 0) {
413                 device_printf(dev, "%s: setup interface failed!\n",
414                     __func__);
415                 error = EIO;
416                 goto out;
417         }
418
419         INIT_DBG_DEV(dev, "Queue memory and interface setup");
420
421         /* Do queue interrupt setup */
422         ixlv_assign_msix(sc);
423
424         /* Start AdminQ taskqueue */
425         ixlv_init_taskqueue(sc);
426
427         /* Initialize stats */
428         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
429         ixlv_add_sysctls(sc);
430
431         /* Register for VLAN events */
432         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
433             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
434         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
435             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
436
437         /* We want AQ enabled early */
438         ixlv_enable_adminq_irq(hw);
439
440         /* Set things up to run init */
441         sc->init_state = IXLV_INIT_READY;
442
443         ixl_vc_init_mgr(sc, &sc->vc_mgr);
444
445         INIT_DBG_DEV(dev, "end");
446         return (error);
447
448 out:
449         ixlv_free_queues(vsi);
450 err_res_buf:
451         free(sc->vf_res, M_DEVBUF);
452 err_aq:
453         i40e_shutdown_adminq(hw);
454 err_pci_res:
455         ixlv_free_pci_resources(sc);
456 err_early:
457         mtx_destroy(&sc->mtx);
458         ixlv_free_filters(sc);
459         INIT_DBG_DEV(dev, "end: error %d", error);
460         return (error);
461 }
462
463 /*********************************************************************
464  *  Device removal routine
465  *
466  *  The detach entry point is called when the driver is being removed.
467  *  This routine stops the adapter and deallocates all the resources
468  *  that were allocated for driver operation.
469  *
470  *  return 0 on success, positive on failure
471  *********************************************************************/
472
473 static int
474 ixlv_detach(device_t dev)
475 {
476         struct ixlv_sc  *sc = device_get_softc(dev);
477         struct ixl_vsi  *vsi = &sc->vsi;
478
479         INIT_DBG_DEV(dev, "begin");
480
481         /* Make sure VLANS are not using driver */
482         if (vsi->ifp->if_vlantrunk != NULL) {
483                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
484                 INIT_DBG_DEV(dev, "end");
485                 return (EBUSY);
486         }
487
488         /* Stop driver */
489         ether_ifdetach(vsi->ifp);
490         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
491                 mtx_lock(&sc->mtx);     
492                 ixlv_stop(sc);
493                 mtx_unlock(&sc->mtx);   
494         }
495
496         /* Unregister VLAN events */
497         if (vsi->vlan_attach != NULL)
498                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
499         if (vsi->vlan_detach != NULL)
500                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
501
502         /* Drain VC mgr */
503         callout_drain(&sc->vc_mgr.callout);
504
505         i40e_shutdown_adminq(&sc->hw);
506         taskqueue_free(sc->tq);
507         if_free(vsi->ifp);
508         free(sc->vf_res, M_DEVBUF);
509         ixlv_free_pci_resources(sc);
510         ixlv_free_queues(vsi);
511         mtx_destroy(&sc->mtx);
512         ixlv_free_filters(sc);
513
514         bus_generic_detach(dev);
515         INIT_DBG_DEV(dev, "end");
516         return (0);
517 }
518
519 /*********************************************************************
520  *
521  *  Shutdown entry point
522  *
523  **********************************************************************/
524
525 static int
526 ixlv_shutdown(device_t dev)
527 {
528         struct ixlv_sc  *sc = device_get_softc(dev);
529
530         INIT_DBG_DEV(dev, "begin");
531
532         mtx_lock(&sc->mtx);     
533         ixlv_stop(sc);
534         mtx_unlock(&sc->mtx);   
535
536         INIT_DBG_DEV(dev, "end");
537         return (0);
538 }
539
540 /*
541  * Configure TXCSUM(IPV6) and TSO(4/6)
542  *      - the hardware handles these together so we
543  *        need to tweak them 
544  */
545 static void
546 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
547 {
548         /* Enable/disable TXCSUM/TSO4 */
549         if (!(ifp->if_capenable & IFCAP_TXCSUM)
550             && !(ifp->if_capenable & IFCAP_TSO4)) {
551                 if (mask & IFCAP_TXCSUM) {
552                         ifp->if_capenable |= IFCAP_TXCSUM;
553                         /* enable TXCSUM, restore TSO if previously enabled */
554                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
555                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
556                                 ifp->if_capenable |= IFCAP_TSO4;
557                         }
558                 }
559                 else if (mask & IFCAP_TSO4) {
560                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
561                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
562                         if_printf(ifp,
563                             "TSO4 requires txcsum, enabling both...\n");
564                 }
565         } else if((ifp->if_capenable & IFCAP_TXCSUM)
566             && !(ifp->if_capenable & IFCAP_TSO4)) {
567                 if (mask & IFCAP_TXCSUM)
568                         ifp->if_capenable &= ~IFCAP_TXCSUM;
569                 else if (mask & IFCAP_TSO4)
570                         ifp->if_capenable |= IFCAP_TSO4;
571         } else if((ifp->if_capenable & IFCAP_TXCSUM)
572             && (ifp->if_capenable & IFCAP_TSO4)) {
573                 if (mask & IFCAP_TXCSUM) {
574                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
575                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
576                         if_printf(ifp, 
577                             "TSO4 requires txcsum, disabling both...\n");
578                 } else if (mask & IFCAP_TSO4)
579                         ifp->if_capenable &= ~IFCAP_TSO4;
580         }
581
582         /* Enable/disable TXCSUM_IPV6/TSO6 */
583         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
584             && !(ifp->if_capenable & IFCAP_TSO6)) {
585                 if (mask & IFCAP_TXCSUM_IPV6) {
586                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
587                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
588                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
589                                 ifp->if_capenable |= IFCAP_TSO6;
590                         }
591                 } else if (mask & IFCAP_TSO6) {
592                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
593                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
594                         if_printf(ifp,
595                             "TSO6 requires txcsum6, enabling both...\n");
596                 }
597         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
598             && !(ifp->if_capenable & IFCAP_TSO6)) {
599                 if (mask & IFCAP_TXCSUM_IPV6)
600                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
601                 else if (mask & IFCAP_TSO6)
602                         ifp->if_capenable |= IFCAP_TSO6;
603         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604             && (ifp->if_capenable & IFCAP_TSO6)) {
605                 if (mask & IFCAP_TXCSUM_IPV6) {
606                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
607                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
608                         if_printf(ifp,
609                             "TSO6 requires txcsum6, disabling both...\n");
610                 } else if (mask & IFCAP_TSO6)
611                         ifp->if_capenable &= ~IFCAP_TSO6;
612         }
613 }
614
615 /*********************************************************************
616  *  Ioctl entry point
617  *
618  *  ixlv_ioctl is called when the user wants to configure the
619  *  interface.
620  *
621  *  return 0 on success, positive on failure
622  **********************************************************************/
623
624 static int
625 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
626 {
627         struct ixl_vsi          *vsi = ifp->if_softc;
628         struct ixlv_sc  *sc = vsi->back;
629         struct ifreq            *ifr = (struct ifreq *)data;
630 #if defined(INET) || defined(INET6)
631         struct ifaddr           *ifa = (struct ifaddr *)data;
632         bool                    avoid_reset = FALSE;
633 #endif
634         int                     error = 0;
635
636
637         switch (command) {
638
639         case SIOCSIFADDR:
640 #ifdef INET
641                 if (ifa->ifa_addr->sa_family == AF_INET)
642                         avoid_reset = TRUE;
643 #endif
644 #ifdef INET6
645                 if (ifa->ifa_addr->sa_family == AF_INET6)
646                         avoid_reset = TRUE;
647 #endif
648 #if defined(INET) || defined(INET6)
649                 /*
650                 ** Calling init results in link renegotiation,
651                 ** so we avoid doing it when possible.
652                 */
653                 if (avoid_reset) {
654                         ifp->if_flags |= IFF_UP;
655                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
656                                 ixlv_init(vsi);
657 #ifdef INET
658                         if (!(ifp->if_flags & IFF_NOARP))
659                                 arp_ifinit(ifp, ifa);
660 #endif
661                 } else
662                         error = ether_ioctl(ifp, command, data);
663                 break;
664 #endif
665         case SIOCSIFMTU:
666                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
667                 mtx_lock(&sc->mtx);
668                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
669                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
670                         error = EINVAL;
671                         IOCTL_DBG_IF(ifp, "mtu too large");
672                 } else {
673                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
674                         // ERJ: Interestingly enough, these types don't match
675                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
676                         vsi->max_frame_size =
677                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
678                             + ETHER_VLAN_ENCAP_LEN;
679                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
680                                 ixlv_init_locked(sc);
681                 }
682                 mtx_unlock(&sc->mtx);
683                 break;
684         case SIOCSIFFLAGS:
685                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
686                 mtx_lock(&sc->mtx);
687                 if (ifp->if_flags & IFF_UP) {
688                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
689                                 ixlv_init_locked(sc);
690                 } else
691                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
692                                 ixlv_stop(sc);
693                 sc->if_flags = ifp->if_flags;
694                 mtx_unlock(&sc->mtx);
695                 break;
696         case SIOCADDMULTI:
697                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
698                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
699                         mtx_lock(&sc->mtx);
700                         ixlv_disable_intr(vsi);
701                         ixlv_add_multi(vsi);
702                         ixlv_enable_intr(vsi);
703                         mtx_unlock(&sc->mtx);
704                 }
705                 break;
706         case SIOCDELMULTI:
707                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
708                 if (sc->init_state == IXLV_RUNNING) {
709                         mtx_lock(&sc->mtx);
710                         ixlv_disable_intr(vsi);
711                         ixlv_del_multi(vsi);
712                         ixlv_enable_intr(vsi);
713                         mtx_unlock(&sc->mtx);
714                 }
715                 break;
716         case SIOCSIFMEDIA:
717         case SIOCGIFMEDIA:
718                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
719                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
720                 break;
721         case SIOCSIFCAP:
722         {
723                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
725
726                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
727
728                 if (mask & IFCAP_RXCSUM)
729                         ifp->if_capenable ^= IFCAP_RXCSUM;
730                 if (mask & IFCAP_RXCSUM_IPV6)
731                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
732                 if (mask & IFCAP_LRO)
733                         ifp->if_capenable ^= IFCAP_LRO;
734                 if (mask & IFCAP_VLAN_HWTAGGING)
735                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
736                 if (mask & IFCAP_VLAN_HWFILTER)
737                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
738                 if (mask & IFCAP_VLAN_HWTSO)
739                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
740                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
741                         ixlv_init(vsi);
742                 }
743                 VLAN_CAPABILITIES(ifp);
744
745                 break;
746         }
747
748         default:
749                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
750                 error = ether_ioctl(ifp, command, data);
751                 break;
752         }
753
754         return (error);
755 }
756
757 /*
758 ** To do a reinit on the VF is unfortunately more complicated
759 ** than a physical device, we must have the PF more or less
760 ** completely recreate our memory, so many things that were
761 ** done only once at attach in traditional drivers now must be
762 ** redone at each reinitialization. This function does that
763 ** 'prelude' so we can then call the normal locked init code.
764 */
765 int
766 ixlv_reinit_locked(struct ixlv_sc *sc)
767 {
768         struct i40e_hw          *hw = &sc->hw;
769         struct ixl_vsi          *vsi = &sc->vsi;
770         struct ifnet            *ifp = vsi->ifp;
771         struct ixlv_mac_filter  *mf, *mf_temp;
772         struct ixlv_vlan_filter *vf;
773         int                     error = 0;
774
775         INIT_DBG_IF(ifp, "begin");
776
777         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
778                 ixlv_stop(sc);
779
780         error = ixlv_reset(sc);
781
782         INIT_DBG_IF(ifp, "VF was reset");
783
784         /* set the state in case we went thru RESET */
785         sc->init_state = IXLV_RUNNING;
786
787         /*
788         ** Resetting the VF drops all filters from hardware;
789         ** we need to mark them to be re-added in init.
790         */
791         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
792                 if (mf->flags & IXL_FILTER_DEL) {
793                         SLIST_REMOVE(sc->mac_filters, mf,
794                             ixlv_mac_filter, next);
795                         free(mf, M_DEVBUF);
796                 } else
797                         mf->flags |= IXL_FILTER_ADD;
798         }
799         if (vsi->num_vlans != 0)
800                 SLIST_FOREACH(vf, sc->vlan_filters, next)
801                         vf->flags = IXL_FILTER_ADD;
802         else { /* clean any stale filters */
803                 while (!SLIST_EMPTY(sc->vlan_filters)) {
804                         vf = SLIST_FIRST(sc->vlan_filters);
805                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
806                         free(vf, M_DEVBUF);
807                 }
808         }
809
810         ixlv_enable_adminq_irq(hw);
811         ixl_vc_flush(&sc->vc_mgr);
812
813         INIT_DBG_IF(ifp, "end");
814         return (error);
815 }
816
817 static void
818 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
819         enum i40e_status_code code)
820 {
821         struct ixlv_sc *sc;
822
823         sc = arg;
824
825         /*
826          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
827          * happens while a command is in progress, so we don't print an error
828          * in that case.
829          */
830         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
831                 if_printf(sc->vsi.ifp,
832                     "Error %d waiting for PF to complete operation %d\n",
833                     code, cmd->request);
834         }
835 }
836
837 static void
838 ixlv_init_locked(struct ixlv_sc *sc)
839 {
840         struct i40e_hw          *hw = &sc->hw;
841         struct ixl_vsi          *vsi = &sc->vsi;
842         struct ixl_queue        *que = vsi->queues;
843         struct ifnet            *ifp = vsi->ifp;
844         int                      error = 0;
845
846         INIT_DBG_IF(ifp, "begin");
847
848         IXLV_CORE_LOCK_ASSERT(sc);
849
850         /* Do a reinit first if an init has already been done */
851         if ((sc->init_state == IXLV_RUNNING) ||
852             (sc->init_state == IXLV_RESET_REQUIRED) ||
853             (sc->init_state == IXLV_RESET_PENDING))
854                 error = ixlv_reinit_locked(sc);
855         /* Don't bother with init if we failed reinit */
856         if (error)
857                 goto init_done;
858
859         /* Remove existing MAC filter if new MAC addr is set */
860         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
861                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
862                 if (error == 0)
863                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
864                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
865                             sc);
866         }
867
868         /* Check for an LAA mac address... */
869         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
870
871         ifp->if_hwassist = 0;
872         if (ifp->if_capenable & IFCAP_TSO)
873                 ifp->if_hwassist |= CSUM_TSO;
874         if (ifp->if_capenable & IFCAP_TXCSUM)
875                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
876         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
877                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
878
879         /* Add mac filter for this VF to PF */
880         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
881                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
882                 if (!error || error == EEXIST)
883                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
884                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
885                             sc);
886         }
887
888         /* Setup vlan's if needed */
889         ixlv_setup_vlan_filters(sc);
890
891         /* Prepare the queues for operation */
892         for (int i = 0; i < vsi->num_queues; i++, que++) {
893                 struct  rx_ring *rxr = &que->rxr;
894
895                 ixl_init_tx_ring(que);
896
897                 if (vsi->max_frame_size <= MCLBYTES)
898                         rxr->mbuf_sz = MCLBYTES;
899                 else
900                         rxr->mbuf_sz = MJUMPAGESIZE;
901                 ixl_init_rx_ring(que);
902         }
903
904         /* Configure queues */
905         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
906             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
907
908         /* Set up RSS */
909         ixlv_config_rss(sc);
910
911         /* Map vectors */
912         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
913             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
914
915         /* Enable queues */
916         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
917             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
918
919         /* Start the local timer */
920         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
921
922         sc->init_state = IXLV_RUNNING;
923
924 init_done:
925         INIT_DBG_IF(ifp, "end");
926         return;
927 }
928
929 /*
930 **  Init entry point for the stack
931 */
932 void
933 ixlv_init(void *arg)
934 {
935         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
936         struct ixlv_sc *sc = vsi->back;
937         int retries = 0;
938
939         mtx_lock(&sc->mtx);
940         ixlv_init_locked(sc);
941         mtx_unlock(&sc->mtx);
942
943         /* Wait for init_locked to finish */
944         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
945             && ++retries < IXLV_AQ_MAX_ERR) {
946                 i40e_msec_delay(25);
947         }
948         if (retries >= IXLV_AQ_MAX_ERR)
949                 if_printf(vsi->ifp,
950                     "Init failed to complete in allotted time!\n");
951 }
952
953 /*
954  * ixlv_attach() helper function; gathers information about
955  * the (virtual) hardware for use elsewhere in the driver.
956  */
957 static void
958 ixlv_init_hw(struct ixlv_sc *sc)
959 {
960         struct i40e_hw *hw = &sc->hw;
961         device_t dev = sc->dev;
962         
963         /* Save off the information about this board */
964         hw->vendor_id = pci_get_vendor(dev);
965         hw->device_id = pci_get_device(dev);
966         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
967         hw->subsystem_vendor_id =
968             pci_read_config(dev, PCIR_SUBVEND_0, 2);
969         hw->subsystem_device_id =
970             pci_read_config(dev, PCIR_SUBDEV_0, 2);
971
972         hw->bus.device = pci_get_slot(dev);
973         hw->bus.func = pci_get_function(dev);
974 }
975
976 /*
977  * ixlv_attach() helper function; initalizes the admin queue
978  * and attempts to establish contact with the PF by
979  * retrying the initial "API version" message several times
980  * or until the PF responds.
981  */
982 static int
983 ixlv_setup_vc(struct ixlv_sc *sc)
984 {
985         struct i40e_hw *hw = &sc->hw;
986         device_t dev = sc->dev;
987         int error = 0, ret_error = 0, asq_retries = 0;
988         bool send_api_ver_retried = 0;
989
990         /* Need to set these AQ paramters before initializing AQ */
991         hw->aq.num_arq_entries = IXL_AQ_LEN;
992         hw->aq.num_asq_entries = IXL_AQ_LEN;
993         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
994         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
995
996         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
997                 /* Initialize admin queue */
998                 error = i40e_init_adminq(hw);
999                 if (error) {
1000                         device_printf(dev, "%s: init_adminq failed: %d\n",
1001                             __func__, error);
1002                         ret_error = 1;
1003                         continue;
1004                 }
1005
1006                 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1007                     " send_api_ver attempt %d", i+1);
1008
1009 retry_send:
1010                 /* Send VF's API version */
1011                 error = ixlv_send_api_ver(sc);
1012                 if (error) {
1013                         i40e_shutdown_adminq(hw);
1014                         ret_error = 2;
1015                         device_printf(dev, "%s: unable to send api"
1016                             " version to PF on attempt %d, error %d\n",
1017                             __func__, i+1, error);
1018                 }
1019
1020                 asq_retries = 0;
1021                 while (!i40e_asq_done(hw)) {
1022                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1023                                 i40e_shutdown_adminq(hw);
1024                                 DDPRINTF(dev, "Admin Queue timeout "
1025                                     "(waiting for send_api_ver), %d more retries...",
1026                                     IXLV_AQ_MAX_ERR - (i + 1));
1027                                 ret_error = 3;
1028                                 break;
1029                         } 
1030                         i40e_msec_delay(10);
1031                 }
1032                 if (asq_retries > IXLV_AQ_MAX_ERR)
1033                         continue;
1034
1035                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1036
1037                 /* Verify that the VF accepts the PF's API version */
1038                 error = ixlv_verify_api_ver(sc);
1039                 if (error == ETIMEDOUT) {
1040                         if (!send_api_ver_retried) {
1041                                 /* Resend message, one more time */
1042                                 send_api_ver_retried++;
1043                                 device_printf(dev,
1044                                     "%s: Timeout while verifying API version on first"
1045                                     " try!\n", __func__);
1046                                 goto retry_send;
1047                         } else {
1048                                 device_printf(dev,
1049                                     "%s: Timeout while verifying API version on second"
1050                                     " try!\n", __func__);
1051                                 ret_error = 4;
1052                                 break;
1053                         }
1054                 }
1055                 if (error) {
1056                         device_printf(dev,
1057                             "%s: Unable to verify API version,"
1058                             " error %d\n", __func__, error);
1059                         ret_error = 5;
1060                 }
1061                 break;
1062         }
1063
1064         if (ret_error >= 4)
1065                 i40e_shutdown_adminq(hw);
1066         return (ret_error);
1067 }
1068
1069 /*
1070  * ixlv_attach() helper function; asks the PF for this VF's
1071  * configuration, and saves the information if it receives it.
1072  */
1073 static int
1074 ixlv_vf_config(struct ixlv_sc *sc)
1075 {
1076         struct i40e_hw *hw = &sc->hw;
1077         device_t dev = sc->dev;
1078         int bufsz, error = 0, ret_error = 0;
1079         int asq_retries, retried = 0;
1080
1081 retry_config:
1082         error = ixlv_send_vf_config_msg(sc);
1083         if (error) {
1084                 device_printf(dev,
1085                     "%s: Unable to send VF config request, attempt %d,"
1086                     " error %d\n", __func__, retried + 1, error);
1087                 ret_error = 2;
1088         }
1089
1090         asq_retries = 0;
1091         while (!i40e_asq_done(hw)) {
1092                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1093                         device_printf(dev, "%s: Admin Queue timeout "
1094                             "(waiting for send_vf_config_msg), attempt %d\n",
1095                             __func__, retried + 1);
1096                         ret_error = 3;
1097                         goto fail;
1098                 }
1099                 i40e_msec_delay(10);
1100         }
1101
1102         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1103             retried + 1);
1104
1105         if (!sc->vf_res) {
1106                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1107                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1108                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1109                 if (!sc->vf_res) {
1110                         device_printf(dev,
1111                             "%s: Unable to allocate memory for VF configuration"
1112                             " message from PF on attempt %d\n", __func__, retried + 1);
1113                         ret_error = 1;
1114                         goto fail;
1115                 }
1116         }
1117
1118         /* Check for VF config response */
1119         error = ixlv_get_vf_config(sc);
1120         if (error == ETIMEDOUT) {
1121                 /* The 1st time we timeout, send the configuration message again */
1122                 if (!retried) {
1123                         retried++;
1124                         goto retry_config;
1125                 }
1126                 device_printf(dev,
1127                     "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1128                     __func__);
1129         }
1130         if (error) {
1131                 device_printf(dev,
1132                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1133                     __func__, retried + 1);
1134                 ret_error = 4;
1135         }
1136         goto done;
1137
1138 fail:
1139         free(sc->vf_res, M_DEVBUF);
1140 done:
1141         return (ret_error);
1142 }
1143
1144 /*
1145  * Allocate MSI/X vectors, setup the AQ vector early
1146  */
1147 static int
1148 ixlv_init_msix(struct ixlv_sc *sc)
1149 {
1150         device_t dev = sc->dev;
1151         int rid, want, vectors, queues, available;
1152
1153         rid = PCIR_BAR(IXL_BAR);
1154         sc->msix_mem = bus_alloc_resource_any(dev,
1155             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1156         if (!sc->msix_mem) {
1157                 /* May not be enabled */
1158                 device_printf(sc->dev,
1159                     "Unable to map MSIX table \n");
1160                 goto fail;
1161         }
1162
1163         available = pci_msix_count(dev); 
1164         if (available == 0) { /* system has msix disabled */
1165                 bus_release_resource(dev, SYS_RES_MEMORY,
1166                     rid, sc->msix_mem);
1167                 sc->msix_mem = NULL;
1168                 goto fail;
1169         }
1170
1171         /* Figure out a reasonable auto config value */
1172         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1173
1174         /* Override with hardcoded value if sane */
1175         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1176                 queues = ixlv_max_queues;
1177 #ifdef  RSS
1178         /* If we're doing RSS, clamp at the number of RSS buckets */
1179         if (queues > rss_getnumbuckets())
1180                 queues = rss_getnumbuckets();
1181 #endif
1182         /* Enforce the VF max value */
1183         if (queues > IXLV_MAX_QUEUES)
1184                 queues = IXLV_MAX_QUEUES;
1185
1186         /*
1187         ** Want one vector (RX/TX pair) per queue
1188         ** plus an additional for the admin queue.
1189         */
1190         want = queues + 1;
1191         if (want <= available)  /* Have enough */
1192                 vectors = want;
1193         else {
1194                 device_printf(sc->dev,
1195                     "MSIX Configuration Problem, "
1196                     "%d vectors available but %d wanted!\n",
1197                     available, want);
1198                 goto fail;
1199         }
1200
1201 #ifdef RSS
1202         /*
1203         * If we're doing RSS, the number of queues needs to
1204         * match the number of RSS buckets that are configured.
1205         *
1206         * + If there's more queues than RSS buckets, we'll end
1207         *   up with queues that get no traffic.
1208         *
1209         * + If there's more RSS buckets than queues, we'll end
1210         *   up having multiple RSS buckets map to the same queue,
1211         *   so there'll be some contention.
1212         */
1213         if (queues != rss_getnumbuckets()) {
1214                 device_printf(dev,
1215                     "%s: queues (%d) != RSS buckets (%d)"
1216                     "; performance will be impacted.\n",
1217                      __func__, queues, rss_getnumbuckets());
1218         }
1219 #endif
1220
1221         if (pci_alloc_msix(dev, &vectors) == 0) {
1222                 device_printf(sc->dev,
1223                     "Using MSIX interrupts with %d vectors\n", vectors);
1224                 sc->msix = vectors;
1225                 sc->vsi.num_queues = queues;
1226         }
1227
1228         /*
1229         ** Explicitly set the guest PCI BUSMASTER capability
1230         ** and we must rewrite the ENABLE in the MSIX control
1231         ** register again at this point to cause the host to
1232         ** successfully initialize us.
1233         */
1234         {
1235                 u16 pci_cmd_word;
1236                 int msix_ctrl;
1237                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1238                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1239                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1240                 pci_find_cap(dev, PCIY_MSIX, &rid);
1241                 rid += PCIR_MSIX_CTRL;
1242                 msix_ctrl = pci_read_config(dev, rid, 2);
1243                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1244                 pci_write_config(dev, rid, msix_ctrl, 2);
1245         }
1246
1247         /* Next we need to setup the vector for the Admin Queue */
1248         rid = 1;        // zero vector + 1
1249         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1250             &rid, RF_SHAREABLE | RF_ACTIVE);
1251         if (sc->res == NULL) {
1252                 device_printf(dev,"Unable to allocate"
1253                     " bus resource: AQ interrupt \n");
1254                 goto fail;
1255         }
1256         if (bus_setup_intr(dev, sc->res,
1257             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1258             ixlv_msix_adminq, sc, &sc->tag)) {
1259                 sc->res = NULL;
1260                 device_printf(dev, "Failed to register AQ handler");
1261                 goto fail;
1262         }
1263         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1264
1265         return (vectors);
1266
1267 fail:
1268         /* The VF driver MUST use MSIX */
1269         return (0);
1270 }
1271
1272 static int
1273 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1274 {
1275         int             rid;
1276         device_t        dev = sc->dev;
1277
1278         rid = PCIR_BAR(0);
1279         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1280             &rid, RF_ACTIVE);
1281
1282         if (!(sc->pci_mem)) {
1283                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1284                 return (ENXIO);
1285         }
1286
1287         sc->osdep.mem_bus_space_tag =
1288                 rman_get_bustag(sc->pci_mem);
1289         sc->osdep.mem_bus_space_handle =
1290                 rman_get_bushandle(sc->pci_mem);
1291         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1292         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1293         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1294
1295         sc->hw.back = &sc->osdep;
1296
1297         /* Disable adminq interrupts */
1298         ixlv_disable_adminq_irq(&sc->hw);
1299
1300         /*
1301         ** Now setup MSI/X, it will return
1302         ** us the number of supported vectors
1303         */
1304         sc->msix = ixlv_init_msix(sc);
1305
1306         /* We fail without MSIX support */
1307         if (sc->msix == 0)
1308                 return (ENXIO);
1309
1310         return (0);
1311 }
1312
1313 static void
1314 ixlv_free_pci_resources(struct ixlv_sc *sc)
1315 {
1316         struct ixl_vsi         *vsi = &sc->vsi;
1317         struct ixl_queue       *que = vsi->queues;
1318         device_t                dev = sc->dev;
1319
1320         /* We may get here before stations are setup */
1321         if (que == NULL)
1322                 goto early;
1323
1324         /*
1325         **  Release all msix queue resources:
1326         */
1327         for (int i = 0; i < vsi->num_queues; i++, que++) {
1328                 int rid = que->msix + 1;
1329                 if (que->tag != NULL) {
1330                         bus_teardown_intr(dev, que->res, que->tag);
1331                         que->tag = NULL;
1332                 }
1333                 if (que->res != NULL)
1334                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1335         }
1336         
1337 early:
1338         /* Clean the AdminQ interrupt */
1339         if (sc->tag != NULL) {
1340                 bus_teardown_intr(dev, sc->res, sc->tag);
1341                 sc->tag = NULL;
1342         }
1343         if (sc->res != NULL)
1344                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1345
1346         pci_release_msi(dev);
1347
1348         if (sc->msix_mem != NULL)
1349                 bus_release_resource(dev, SYS_RES_MEMORY,
1350                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1351
1352         if (sc->pci_mem != NULL)
1353                 bus_release_resource(dev, SYS_RES_MEMORY,
1354                     PCIR_BAR(0), sc->pci_mem);
1355
1356         return;
1357 }
1358
1359 /*
1360  * Create taskqueue and tasklet for Admin Queue interrupts.
1361  */
1362 static int
1363 ixlv_init_taskqueue(struct ixlv_sc *sc)
1364 {
1365         int error = 0;
1366
1367         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1368
1369         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1370             taskqueue_thread_enqueue, &sc->tq);
1371         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1372             device_get_nameunit(sc->dev));
1373
1374         return (error);
1375 }
1376
1377 /*********************************************************************
1378  *
1379  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1380  *
1381  **********************************************************************/
1382 static int
1383 ixlv_assign_msix(struct ixlv_sc *sc)
1384 {
1385         device_t        dev = sc->dev;
1386         struct          ixl_vsi *vsi = &sc->vsi;
1387         struct          ixl_queue *que = vsi->queues;
1388         struct          tx_ring  *txr;
1389         int             error, rid, vector = 1;
1390 #ifdef  RSS
1391         cpuset_t        cpu_mask;
1392 #endif
1393
1394         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1395                 int cpu_id = i;
1396                 rid = vector + 1;
1397                 txr = &que->txr;
1398                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1399                     RF_SHAREABLE | RF_ACTIVE);
1400                 if (que->res == NULL) {
1401                         device_printf(dev,"Unable to allocate"
1402                             " bus resource: que interrupt [%d]\n", vector);
1403                         return (ENXIO);
1404                 }
1405                 /* Set the handler function */
1406                 error = bus_setup_intr(dev, que->res,
1407                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1408                     ixlv_msix_que, que, &que->tag);
1409                 if (error) {
1410                         que->res = NULL;
1411                         device_printf(dev, "Failed to register que handler");
1412                         return (error);
1413                 }
1414                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1415                 /* Bind the vector to a CPU */
1416 #ifdef RSS
1417                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1418 #endif
1419                 bus_bind_intr(dev, que->res, cpu_id);
1420                 que->msix = vector;
1421                 vsi->que_mask |= (u64)(1 << que->msix);
1422                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1423                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1424                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1425                     taskqueue_thread_enqueue, &que->tq);
1426 #ifdef RSS
1427                 CPU_SETOF(cpu_id, &cpu_mask);
1428                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1429                     &cpu_mask, "%s (bucket %d)",
1430                     device_get_nameunit(dev), cpu_id);
1431 #else
1432                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1433                     "%s que", device_get_nameunit(dev));
1434 #endif
1435
1436         }
1437
1438         return (0);
1439 }
1440
1441 /*
1442 ** Requests a VF reset from the PF.
1443 **
1444 ** Requires the VF's Admin Queue to be initialized.
1445 */
1446 static int
1447 ixlv_reset(struct ixlv_sc *sc)
1448 {
1449         struct i40e_hw  *hw = &sc->hw;
1450         device_t        dev = sc->dev;
1451         int             error = 0;
1452
1453         /* Ask the PF to reset us if we are initiating */
1454         if (sc->init_state != IXLV_RESET_PENDING)
1455                 ixlv_request_reset(sc);
1456
1457         i40e_msec_delay(100);
1458         error = ixlv_reset_complete(hw);
1459         if (error) {
1460                 device_printf(dev, "%s: VF reset failed\n",
1461                     __func__);
1462                 return (error);
1463         }
1464
1465         error = i40e_shutdown_adminq(hw);
1466         if (error) {
1467                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1468                     __func__, error);
1469                 return (error);
1470         }
1471
1472         error = i40e_init_adminq(hw);
1473         if (error) {
1474                 device_printf(dev, "%s: init_adminq failed: %d\n",
1475                     __func__, error);
1476                 return(error);
1477         }
1478
1479         return (0);
1480 }
1481
1482 static int
1483 ixlv_reset_complete(struct i40e_hw *hw)
1484 {
1485         u32 reg;
1486
1487         for (int i = 0; i < 100; i++) {
1488                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1489                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1490
1491                 if ((reg == I40E_VFR_VFACTIVE) ||
1492                     (reg == I40E_VFR_COMPLETED))
1493                         return (0);
1494                 i40e_msec_delay(100);
1495         }
1496
1497         return (EBUSY);
1498 }
1499
1500
1501 /*********************************************************************
1502  *
1503  *  Setup networking device structure and register an interface.
1504  *
1505  **********************************************************************/
1506 static int
1507 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1508 {
1509         struct ifnet            *ifp;
1510         struct ixl_vsi          *vsi = &sc->vsi;
1511         struct ixl_queue        *que = vsi->queues;
1512
1513         INIT_DBG_DEV(dev, "begin");
1514
1515         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1516         if (ifp == NULL) {
1517                 device_printf(dev, "%s: could not allocate ifnet"
1518                     " structure!\n", __func__);
1519                 return (-1);
1520         }
1521
1522         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1523
1524         ifp->if_mtu = ETHERMTU;
1525         ifp->if_baudrate = 4000000000;  // ??
1526         ifp->if_init = ixlv_init;
1527         ifp->if_softc = vsi;
1528         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1529         ifp->if_ioctl = ixlv_ioctl;
1530
1531 #if __FreeBSD_version >= 1100000
1532         if_setgetcounterfn(ifp, ixl_get_counter);
1533 #endif
1534
1535         ifp->if_transmit = ixl_mq_start;
1536
1537         ifp->if_qflush = ixl_qflush;
1538         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1539
1540         ether_ifattach(ifp, sc->hw.mac.addr);
1541
1542         vsi->max_frame_size =
1543             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1544             + ETHER_VLAN_ENCAP_LEN;
1545
1546         /*
1547          * Tell the upper layer(s) we support long frames.
1548          */
1549         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1550
1551         ifp->if_capabilities |= IFCAP_HWCSUM;
1552         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1553         ifp->if_capabilities |= IFCAP_TSO;
1554         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1555
1556         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1557                              |  IFCAP_VLAN_HWTSO
1558                              |  IFCAP_VLAN_MTU
1559                              |  IFCAP_VLAN_HWCSUM
1560                              |  IFCAP_LRO;
1561         ifp->if_capenable = ifp->if_capabilities;
1562
1563         /*
1564         ** Don't turn this on by default, if vlans are
1565         ** created on another pseudo device (eg. lagg)
1566         ** then vlan events are not passed thru, breaking
1567         ** operation, but with HW FILTER off it works. If
1568         ** using vlans directly on the ixl driver you can
1569         ** enable this and get full hardware tag filtering.
1570         */
1571         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1572
1573         /*
1574          * Specify the media types supported by this adapter and register
1575          * callbacks to update media and link information
1576          */
1577         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1578                      ixlv_media_status);
1579
1580         // JFV Add media types later?
1581
1582         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1583         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1584
1585         INIT_DBG_DEV(dev, "end");
1586         return (0);
1587 }
1588
1589 /*
1590 ** Allocate and setup the interface queues
1591 */
1592 static int
1593 ixlv_setup_queues(struct ixlv_sc *sc)
1594 {
1595         device_t                dev = sc->dev;
1596         struct ixl_vsi          *vsi;
1597         struct ixl_queue        *que;
1598         struct tx_ring          *txr;
1599         struct rx_ring          *rxr;
1600         int                     rsize, tsize;
1601         int                     error = I40E_SUCCESS;
1602
1603         vsi = &sc->vsi;
1604         vsi->back = (void *)sc;
1605         vsi->hw = &sc->hw;
1606         vsi->num_vlans = 0;
1607
1608         /* Get memory for the station queues */
1609         if (!(vsi->queues =
1610                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1611                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1612                         device_printf(dev, "Unable to allocate queue memory\n");
1613                         error = ENOMEM;
1614                         goto early;
1615         }
1616
1617         for (int i = 0; i < vsi->num_queues; i++) {
1618                 que = &vsi->queues[i];
1619                 que->num_desc = ixlv_ringsz;
1620                 que->me = i;
1621                 que->vsi = vsi;
1622                 /* mark the queue as active */
1623                 vsi->active_queues |= (u64)1 << que->me;
1624
1625                 txr = &que->txr;
1626                 txr->que = que;
1627                 txr->tail = I40E_QTX_TAIL1(que->me);
1628                 /* Initialize the TX lock */
1629                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1630                     device_get_nameunit(dev), que->me);
1631                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1632                 /*
1633                 ** Create the TX descriptor ring, the extra int is
1634                 ** added as the location for HEAD WB.
1635                 */
1636                 tsize = roundup2((que->num_desc *
1637                     sizeof(struct i40e_tx_desc)) +
1638                     sizeof(u32), DBA_ALIGN);
1639                 if (i40e_allocate_dma_mem(&sc->hw,
1640                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1641                         device_printf(dev,
1642                             "Unable to allocate TX Descriptor memory\n");
1643                         error = ENOMEM;
1644                         goto fail;
1645                 }
1646                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1647                 bzero((void *)txr->base, tsize);
1648                 /* Now allocate transmit soft structs for the ring */
1649                 if (ixl_allocate_tx_data(que)) {
1650                         device_printf(dev,
1651                             "Critical Failure setting up TX structures\n");
1652                         error = ENOMEM;
1653                         goto fail;
1654                 }
1655                 /* Allocate a buf ring */
1656                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1657                     M_WAITOK, &txr->mtx);
1658                 if (txr->br == NULL) {
1659                         device_printf(dev,
1660                             "Critical Failure setting up TX buf ring\n");
1661                         error = ENOMEM;
1662                         goto fail;
1663                 }
1664
1665                 /*
1666                  * Next the RX queues...
1667                  */ 
1668                 rsize = roundup2(que->num_desc *
1669                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1670                 rxr = &que->rxr;
1671                 rxr->que = que;
1672                 rxr->tail = I40E_QRX_TAIL1(que->me);
1673
1674                 /* Initialize the RX side lock */
1675                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1676                     device_get_nameunit(dev), que->me);
1677                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1678
1679                 if (i40e_allocate_dma_mem(&sc->hw,
1680                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1681                         device_printf(dev,
1682                             "Unable to allocate RX Descriptor memory\n");
1683                         error = ENOMEM;
1684                         goto fail;
1685                 }
1686                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1687                 bzero((void *)rxr->base, rsize);
1688
1689                 /* Allocate receive soft structs for the ring*/
1690                 if (ixl_allocate_rx_data(que)) {
1691                         device_printf(dev,
1692                             "Critical Failure setting up receive structs\n");
1693                         error = ENOMEM;
1694                         goto fail;
1695                 }
1696         }
1697
1698         return (0);
1699
1700 fail:
1701         for (int i = 0; i < vsi->num_queues; i++) {
1702                 que = &vsi->queues[i];
1703                 rxr = &que->rxr;
1704                 txr = &que->txr;
1705                 if (rxr->base)
1706                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1707                 if (txr->base)
1708                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1709         }
1710         free(vsi->queues, M_DEVBUF);
1711
1712 early:
1713         return (error);
1714 }
1715
1716 /*
1717 ** This routine is run via an vlan config EVENT,
1718 ** it enables us to use the HW Filter table since
1719 ** we can get the vlan id. This just creates the
1720 ** entry in the soft version of the VFTA, init will
1721 ** repopulate the real table.
1722 */
1723 static void
1724 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1725 {
1726         struct ixl_vsi          *vsi = arg;
1727         struct ixlv_sc          *sc = vsi->back;
1728         struct ixlv_vlan_filter *v;
1729
1730
1731         if (ifp->if_softc != arg)   /* Not our event */
1732                 return;
1733
1734         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1735                 return;
1736
1737         /* Sanity check - make sure it doesn't already exist */
1738         SLIST_FOREACH(v, sc->vlan_filters, next) {
1739                 if (v->vlan == vtag)
1740                         return;
1741         }
1742
1743         mtx_lock(&sc->mtx);
1744         ++vsi->num_vlans;
1745         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1746         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1747         v->vlan = vtag;
1748         v->flags = IXL_FILTER_ADD;
1749         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1750             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1751         mtx_unlock(&sc->mtx);
1752         return;
1753 }
1754
1755 /*
1756 ** This routine is run via an vlan
1757 ** unconfig EVENT, remove our entry
1758 ** in the soft vfta.
1759 */
1760 static void
1761 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1762 {
1763         struct ixl_vsi          *vsi = arg;
1764         struct ixlv_sc          *sc = vsi->back;
1765         struct ixlv_vlan_filter *v;
1766         int                     i = 0;
1767         
1768         if (ifp->if_softc != arg)
1769                 return;
1770
1771         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1772                 return;
1773
1774         mtx_lock(&sc->mtx);
1775         SLIST_FOREACH(v, sc->vlan_filters, next) {
1776                 if (v->vlan == vtag) {
1777                         v->flags = IXL_FILTER_DEL;
1778                         ++i;
1779                         --vsi->num_vlans;
1780                 }
1781         }
1782         if (i)
1783                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1784                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1785         mtx_unlock(&sc->mtx);
1786         return;
1787 }
1788
1789 /*
1790 ** Get a new filter and add it to the mac filter list.
1791 */
1792 static struct ixlv_mac_filter *
1793 ixlv_get_mac_filter(struct ixlv_sc *sc)
1794 {
1795         struct ixlv_mac_filter  *f;
1796
1797         f = malloc(sizeof(struct ixlv_mac_filter),
1798             M_DEVBUF, M_NOWAIT | M_ZERO);
1799         if (f)
1800                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1801
1802         return (f);
1803 }
1804
1805 /*
1806 ** Find the filter with matching MAC address
1807 */
1808 static struct ixlv_mac_filter *
1809 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1810 {
1811         struct ixlv_mac_filter  *f;
1812         bool                            match = FALSE;
1813
1814         SLIST_FOREACH(f, sc->mac_filters, next) {
1815                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1816                         match = TRUE;
1817                         break;
1818                 }
1819         }       
1820
1821         if (!match)
1822                 f = NULL;
1823         return (f);
1824 }
1825
1826 /*
1827 ** Admin Queue interrupt handler
1828 */
1829 static void
1830 ixlv_msix_adminq(void *arg)
1831 {
1832         struct ixlv_sc  *sc = arg;
1833         struct i40e_hw  *hw = &sc->hw;
1834         u32             reg, mask;
1835
1836         reg = rd32(hw, I40E_VFINT_ICR01);
1837         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1838
1839         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1840         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1841         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1842
1843         /* schedule task */
1844         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1845         return;
1846 }
1847
1848 void
1849 ixlv_enable_intr(struct ixl_vsi *vsi)
1850 {
1851         struct i40e_hw          *hw = vsi->hw;
1852         struct ixl_queue        *que = vsi->queues;
1853
1854         ixlv_enable_adminq_irq(hw);
1855         for (int i = 0; i < vsi->num_queues; i++, que++)
1856                 ixlv_enable_queue_irq(hw, que->me);
1857 }
1858
1859 void
1860 ixlv_disable_intr(struct ixl_vsi *vsi)
1861 {
1862         struct i40e_hw          *hw = vsi->hw;
1863         struct ixl_queue       *que = vsi->queues;
1864
1865         ixlv_disable_adminq_irq(hw);
1866         for (int i = 0; i < vsi->num_queues; i++, que++)
1867                 ixlv_disable_queue_irq(hw, que->me);
1868 }
1869
1870
1871 static void
1872 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1873 {
1874         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1875         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1876         /* flush */
1877         rd32(hw, I40E_VFGEN_RSTAT);
1878         return;
1879 }
1880
1881 static void
1882 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1883 {
1884         wr32(hw, I40E_VFINT_DYN_CTL01,
1885             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1886             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1887         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1888         /* flush */
1889         rd32(hw, I40E_VFGEN_RSTAT);
1890         return;
1891 }
1892
1893 static void
1894 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1895 {
1896         u32             reg;
1897
1898         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1899             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; 
1900         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1901 }
1902
1903 static void
1904 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1905 {
1906         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1907         rd32(hw, I40E_VFGEN_RSTAT);
1908         return;
1909 }
1910
1911
1912 /*
1913 ** Provide a update to the queue RX
1914 ** interrupt moderation value.
1915 */
1916 static void
1917 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1918 {
1919         struct ixl_vsi  *vsi = que->vsi;
1920         struct i40e_hw  *hw = vsi->hw;
1921         struct rx_ring  *rxr = &que->rxr;
1922         u16             rx_itr;
1923         u16             rx_latency = 0;
1924         int             rx_bytes;
1925
1926
1927         /* Idle, do nothing */
1928         if (rxr->bytes == 0)
1929                 return;
1930
1931         if (ixlv_dynamic_rx_itr) {
1932                 rx_bytes = rxr->bytes/rxr->itr;
1933                 rx_itr = rxr->itr;
1934
1935                 /* Adjust latency range */
1936                 switch (rxr->latency) {
1937                 case IXL_LOW_LATENCY:
1938                         if (rx_bytes > 10) {
1939                                 rx_latency = IXL_AVE_LATENCY;
1940                                 rx_itr = IXL_ITR_20K;
1941                         }
1942                         break;
1943                 case IXL_AVE_LATENCY:
1944                         if (rx_bytes > 20) {
1945                                 rx_latency = IXL_BULK_LATENCY;
1946                                 rx_itr = IXL_ITR_8K;
1947                         } else if (rx_bytes <= 10) {
1948                                 rx_latency = IXL_LOW_LATENCY;
1949                                 rx_itr = IXL_ITR_100K;
1950                         }
1951                         break;
1952                 case IXL_BULK_LATENCY:
1953                         if (rx_bytes <= 20) {
1954                                 rx_latency = IXL_AVE_LATENCY;
1955                                 rx_itr = IXL_ITR_20K;
1956                         }
1957                         break;
1958                  }
1959
1960                 rxr->latency = rx_latency;
1961
1962                 if (rx_itr != rxr->itr) {
1963                         /* do an exponential smoothing */
1964                         rx_itr = (10 * rx_itr * rxr->itr) /
1965                             ((9 * rx_itr) + rxr->itr);
1966                         rxr->itr = rx_itr & IXL_MAX_ITR;
1967                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1968                             que->me), rxr->itr);
1969                 }
1970         } else { /* We may have have toggled to non-dynamic */
1971                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1972                         vsi->rx_itr_setting = ixlv_rx_itr;
1973                 /* Update the hardware if needed */
1974                 if (rxr->itr != vsi->rx_itr_setting) {
1975                         rxr->itr = vsi->rx_itr_setting;
1976                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1977                             que->me), rxr->itr);
1978                 }
1979         }
1980         rxr->bytes = 0;
1981         rxr->packets = 0;
1982         return;
1983 }
1984
1985
1986 /*
1987 ** Provide a update to the queue TX
1988 ** interrupt moderation value.
1989 */
1990 static void
1991 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1992 {
1993         struct ixl_vsi  *vsi = que->vsi;
1994         struct i40e_hw  *hw = vsi->hw;
1995         struct tx_ring  *txr = &que->txr;
1996         u16             tx_itr;
1997         u16             tx_latency = 0;
1998         int             tx_bytes;
1999
2000
2001         /* Idle, do nothing */
2002         if (txr->bytes == 0)
2003                 return;
2004
2005         if (ixlv_dynamic_tx_itr) {
2006                 tx_bytes = txr->bytes/txr->itr;
2007                 tx_itr = txr->itr;
2008
2009                 switch (txr->latency) {
2010                 case IXL_LOW_LATENCY:
2011                         if (tx_bytes > 10) {
2012                                 tx_latency = IXL_AVE_LATENCY;
2013                                 tx_itr = IXL_ITR_20K;
2014                         }
2015                         break;
2016                 case IXL_AVE_LATENCY:
2017                         if (tx_bytes > 20) {
2018                                 tx_latency = IXL_BULK_LATENCY;
2019                                 tx_itr = IXL_ITR_8K;
2020                         } else if (tx_bytes <= 10) {
2021                                 tx_latency = IXL_LOW_LATENCY;
2022                                 tx_itr = IXL_ITR_100K;
2023                         }
2024                         break;
2025                 case IXL_BULK_LATENCY:
2026                         if (tx_bytes <= 20) {
2027                                 tx_latency = IXL_AVE_LATENCY;
2028                                 tx_itr = IXL_ITR_20K;
2029                         }
2030                         break;
2031                 }
2032
2033                 txr->latency = tx_latency;
2034
2035                 if (tx_itr != txr->itr) {
2036                  /* do an exponential smoothing */
2037                         tx_itr = (10 * tx_itr * txr->itr) /
2038                             ((9 * tx_itr) + txr->itr);
2039                         txr->itr = tx_itr & IXL_MAX_ITR;
2040                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2041                             que->me), txr->itr);
2042                 }
2043
2044         } else { /* We may have have toggled to non-dynamic */
2045                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2046                         vsi->tx_itr_setting = ixlv_tx_itr;
2047                 /* Update the hardware if needed */
2048                 if (txr->itr != vsi->tx_itr_setting) {
2049                         txr->itr = vsi->tx_itr_setting;
2050                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2051                             que->me), txr->itr);
2052                 }
2053         }
2054         txr->bytes = 0;
2055         txr->packets = 0;
2056         return;
2057 }
2058
2059
2060 /*
2061 **
2062 ** MSIX Interrupt Handlers and Tasklets
2063 **
2064 */
2065 static void
2066 ixlv_handle_que(void *context, int pending)
2067 {
2068         struct ixl_queue *que = context;
2069         struct ixl_vsi *vsi = que->vsi;
2070         struct i40e_hw  *hw = vsi->hw;
2071         struct tx_ring  *txr = &que->txr;
2072         struct ifnet    *ifp = vsi->ifp;
2073         bool            more;
2074
2075         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2076                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2077                 mtx_lock(&txr->mtx);
2078                 ixl_txeof(que);
2079                 if (!drbr_empty(ifp, txr->br))
2080                         ixl_mq_start_locked(ifp, txr);
2081                 mtx_unlock(&txr->mtx);
2082                 if (more) {
2083                         taskqueue_enqueue(que->tq, &que->task);
2084                         return;
2085                 }
2086         }
2087
2088         /* Reenable this interrupt - hmmm */
2089         ixlv_enable_queue_irq(hw, que->me);
2090         return;
2091 }
2092
2093
2094 /*********************************************************************
2095  *
2096  *  MSIX Queue Interrupt Service routine
2097  *
2098  **********************************************************************/
2099 static void
2100 ixlv_msix_que(void *arg)
2101 {
2102         struct ixl_queue        *que = arg;
2103         struct ixl_vsi  *vsi = que->vsi;
2104         struct i40e_hw  *hw = vsi->hw;
2105         struct tx_ring  *txr = &que->txr;
2106         bool            more_tx, more_rx;
2107
2108         /* Spurious interrupts are ignored */
2109         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2110                 return;
2111
2112         ++que->irqs;
2113
2114         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2115
2116         mtx_lock(&txr->mtx);
2117         more_tx = ixl_txeof(que);
2118         /*
2119         ** Make certain that if the stack 
2120         ** has anything queued the task gets
2121         ** scheduled to handle it.
2122         */
2123         if (!drbr_empty(vsi->ifp, txr->br))
2124                 more_tx = 1;
2125         mtx_unlock(&txr->mtx);
2126
2127         ixlv_set_queue_rx_itr(que);
2128         ixlv_set_queue_tx_itr(que);
2129
2130         if (more_tx || more_rx)
2131                 taskqueue_enqueue(que->tq, &que->task);
2132         else
2133                 ixlv_enable_queue_irq(hw, que->me);
2134
2135         return;
2136 }
2137
2138
2139 /*********************************************************************
2140  *
2141  *  Media Ioctl callback
2142  *
2143  *  This routine is called whenever the user queries the status of
2144  *  the interface using ifconfig.
2145  *
2146  **********************************************************************/
2147 static void
2148 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2149 {
2150         struct ixl_vsi          *vsi = ifp->if_softc;
2151         struct ixlv_sc  *sc = vsi->back;
2152
2153         INIT_DBG_IF(ifp, "begin");
2154
2155         mtx_lock(&sc->mtx);
2156
2157         ixlv_update_link_status(sc);
2158
2159         ifmr->ifm_status = IFM_AVALID;
2160         ifmr->ifm_active = IFM_ETHER;
2161
2162         if (!sc->link_up) {
2163                 mtx_unlock(&sc->mtx);
2164                 INIT_DBG_IF(ifp, "end: link not up");
2165                 return;
2166         }
2167
2168         ifmr->ifm_status |= IFM_ACTIVE;
2169         /* Hardware is always full-duplex */
2170         ifmr->ifm_active |= IFM_FDX;
2171         mtx_unlock(&sc->mtx);
2172         INIT_DBG_IF(ifp, "end");
2173         return;
2174 }
2175
2176 /*********************************************************************
2177  *
2178  *  Media Ioctl callback
2179  *
2180  *  This routine is called when the user changes speed/duplex using
2181  *  media/mediopt option with ifconfig.
2182  *
2183  **********************************************************************/
2184 static int
2185 ixlv_media_change(struct ifnet * ifp)
2186 {
2187         struct ixl_vsi *vsi = ifp->if_softc;
2188         struct ifmedia *ifm = &vsi->media;
2189
2190         INIT_DBG_IF(ifp, "begin");
2191
2192         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2193                 return (EINVAL);
2194
2195         INIT_DBG_IF(ifp, "end");
2196         return (0);
2197 }
2198
2199
2200 /*********************************************************************
2201  *  Multicast Initialization
2202  *
2203  *  This routine is called by init to reset a fresh state.
2204  *
2205  **********************************************************************/
2206
2207 static void
2208 ixlv_init_multi(struct ixl_vsi *vsi)
2209 {
2210         struct ixlv_mac_filter *f;
2211         struct ixlv_sc  *sc = vsi->back;
2212         int                     mcnt = 0;
2213
2214         IOCTL_DBG_IF(vsi->ifp, "begin");
2215
2216         /* First clear any multicast filters */
2217         SLIST_FOREACH(f, sc->mac_filters, next) {
2218                 if ((f->flags & IXL_FILTER_USED)
2219                     && (f->flags & IXL_FILTER_MC)) {
2220                         f->flags |= IXL_FILTER_DEL;
2221                         mcnt++;
2222                 }
2223         }
2224         if (mcnt > 0)
2225                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2226                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2227                     sc);
2228
2229         IOCTL_DBG_IF(vsi->ifp, "end");
2230 }
2231
2232 static void
2233 ixlv_add_multi(struct ixl_vsi *vsi)
2234 {
2235         struct ifmultiaddr      *ifma;
2236         struct ifnet            *ifp = vsi->ifp;
2237         struct ixlv_sc  *sc = vsi->back;
2238         int                     mcnt = 0;
2239
2240         IOCTL_DBG_IF(ifp, "begin");
2241
2242         if_maddr_rlock(ifp);
2243         /*
2244         ** Get a count, to decide if we
2245         ** simply use multicast promiscuous.
2246         */
2247         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2248                 if (ifma->ifma_addr->sa_family != AF_LINK)
2249                         continue;
2250                 mcnt++;
2251         }
2252         if_maddr_runlock(ifp);
2253
2254         // TODO: Remove -- cannot set promiscuous mode in a VF
2255         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2256                 /* delete all multicast filters */
2257                 ixlv_init_multi(vsi);
2258                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2259                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2260                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2261                     sc);
2262                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2263                 return;
2264         }
2265
2266         mcnt = 0;
2267         if_maddr_rlock(ifp);
2268         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2269                 if (ifma->ifma_addr->sa_family != AF_LINK)
2270                         continue;
2271                 if (!ixlv_add_mac_filter(sc,
2272                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2273                     IXL_FILTER_MC))
2274                         mcnt++;
2275         }
2276         if_maddr_runlock(ifp);
2277         /*
2278         ** Notify AQ task that sw filters need to be
2279         ** added to hw list
2280         */
2281         if (mcnt > 0)
2282                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2283                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2284                     sc);
2285
2286         IOCTL_DBG_IF(ifp, "end");
2287 }
2288
2289 static void
2290 ixlv_del_multi(struct ixl_vsi *vsi)
2291 {
2292         struct ixlv_mac_filter *f;
2293         struct ifmultiaddr      *ifma;
2294         struct ifnet            *ifp = vsi->ifp;
2295         struct ixlv_sc  *sc = vsi->back;
2296         int                     mcnt = 0;
2297         bool            match = FALSE;
2298
2299         IOCTL_DBG_IF(ifp, "begin");
2300
2301         /* Search for removed multicast addresses */
2302         if_maddr_rlock(ifp);
2303         SLIST_FOREACH(f, sc->mac_filters, next) {
2304                 if ((f->flags & IXL_FILTER_USED)
2305                     && (f->flags & IXL_FILTER_MC)) {
2306                         /* check if mac address in filter is in sc's list */
2307                         match = FALSE;
2308                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2309                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2310                                         continue;
2311                                 u8 *mc_addr =
2312                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2313                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2314                                         match = TRUE;
2315                                         break;
2316                                 }
2317                         }
2318                         /* if this filter is not in the sc's list, remove it */
2319                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2320                                 f->flags |= IXL_FILTER_DEL;
2321                                 mcnt++;
2322                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2323                                     MAC_FORMAT_ARGS(f->macaddr));
2324                         }
2325                         else if (match == FALSE)
2326                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2327                                     MAC_FORMAT_ARGS(f->macaddr));
2328                 }
2329         }
2330         if_maddr_runlock(ifp);
2331
2332         if (mcnt > 0)
2333                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2334                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2335                     sc);
2336
2337         IOCTL_DBG_IF(ifp, "end");
2338 }
2339
2340 /*********************************************************************
2341  *  Timer routine
2342  *
2343  *  This routine checks for link status,updates statistics,
2344  *  and runs the watchdog check.
2345  *
2346  **********************************************************************/
2347
2348 static void
2349 ixlv_local_timer(void *arg)
2350 {
2351         struct ixlv_sc  *sc = arg;
2352         struct i40e_hw          *hw = &sc->hw;
2353         struct ixl_vsi          *vsi = &sc->vsi;
2354         struct ixl_queue        *que = vsi->queues;
2355         device_t                dev = sc->dev;
2356         int                     hung = 0;
2357         u32                     mask, val;
2358
2359         IXLV_CORE_LOCK_ASSERT(sc);
2360
2361         /* If Reset is in progress just bail */
2362         if (sc->init_state == IXLV_RESET_PENDING)
2363                 return;
2364
2365         /* Check for when PF triggers a VF reset */
2366         val = rd32(hw, I40E_VFGEN_RSTAT) &
2367             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2368
2369         if (val != I40E_VFR_VFACTIVE
2370             && val != I40E_VFR_COMPLETED) {
2371                 DDPRINTF(dev, "reset in progress! (%d)", val);
2372                 return;
2373         }
2374
2375         ixlv_request_stats(sc);
2376
2377         /* clean and process any events */
2378         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2379
2380         /*
2381         ** Check status on the queues for a hang
2382         */
2383         mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2384             I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2385
2386         for (int i = 0; i < vsi->num_queues; i++,que++) {
2387                 /* Any queues with outstanding work get a sw irq */
2388                 if (que->busy)
2389                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2390                 /*
2391                 ** Each time txeof runs without cleaning, but there
2392                 ** are uncleaned descriptors it increments busy. If
2393                 ** we get to 5 we declare it hung.
2394                 */
2395                 if (que->busy == IXL_QUEUE_HUNG) {
2396                         ++hung;
2397                         /* Mark the queue as inactive */
2398                         vsi->active_queues &= ~((u64)1 << que->me);
2399                         continue;
2400                 } else {
2401                         /* Check if we've come back from hung */
2402                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2403                                 vsi->active_queues |= ((u64)1 << que->me);
2404                 }
2405                 if (que->busy >= IXL_MAX_TX_BUSY) {
2406                         device_printf(dev,"Warning queue %d "
2407                             "appears to be hung!\n", i);
2408                         que->busy = IXL_QUEUE_HUNG;
2409                         ++hung;
2410                 }
2411         }
2412         /* Only reset when all queues show hung */
2413         if (hung == vsi->num_queues)
2414                 goto hung;
2415         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2416         return;
2417
2418 hung:
2419         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2420         sc->init_state = IXLV_RESET_REQUIRED;
2421         ixlv_init_locked(sc);
2422 }
2423
2424 /*
2425 ** Note: this routine updates the OS on the link state
2426 **      the real check of the hardware only happens with
2427 **      a link interrupt.
2428 */
2429 void
2430 ixlv_update_link_status(struct ixlv_sc *sc)
2431 {
2432         struct ixl_vsi          *vsi = &sc->vsi;
2433         struct ifnet            *ifp = vsi->ifp;
2434
2435         if (sc->link_up){ 
2436                 if (vsi->link_active == FALSE) {
2437                         if (bootverbose)
2438                                 if_printf(ifp,"Link is Up, %d Gbps\n",
2439                                     (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2440                         vsi->link_active = TRUE;
2441                         if_link_state_change(ifp, LINK_STATE_UP);
2442                 }
2443         } else { /* Link down */
2444                 if (vsi->link_active == TRUE) {
2445                         if (bootverbose)
2446                                 if_printf(ifp,"Link is Down\n");
2447                         if_link_state_change(ifp, LINK_STATE_DOWN);
2448                         vsi->link_active = FALSE;
2449                 }
2450         }
2451
2452         return;
2453 }
2454
2455 /*********************************************************************
2456  *
2457  *  This routine disables all traffic on the adapter by issuing a
2458  *  global reset on the MAC and deallocates TX/RX buffers.
2459  *
2460  **********************************************************************/
2461
2462 static void
2463 ixlv_stop(struct ixlv_sc *sc)
2464 {
2465         struct ifnet *ifp;
2466         int start;
2467
2468         ifp = sc->vsi.ifp;
2469         INIT_DBG_IF(ifp, "begin");
2470
2471         IXLV_CORE_LOCK_ASSERT(sc);
2472
2473         ixl_vc_flush(&sc->vc_mgr);
2474         ixlv_disable_queues(sc);
2475
2476         start = ticks;
2477         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2478             ((ticks - start) < hz/10))
2479                 ixlv_do_adminq_locked(sc);
2480
2481         /* Stop the local timer */
2482         callout_stop(&sc->timer);
2483
2484         INIT_DBG_IF(ifp, "end");
2485 }
2486
2487
2488 /*********************************************************************
2489  *
2490  *  Free all station queue structs.
2491  *
2492  **********************************************************************/
2493 static void
2494 ixlv_free_queues(struct ixl_vsi *vsi)
2495 {
2496         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2497         struct ixl_queue        *que = vsi->queues;
2498
2499         for (int i = 0; i < vsi->num_queues; i++, que++) {
2500                 struct tx_ring *txr = &que->txr;
2501                 struct rx_ring *rxr = &que->rxr;
2502         
2503                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2504                         continue;
2505                 IXL_TX_LOCK(txr);
2506                 ixl_free_que_tx(que);
2507                 if (txr->base)
2508                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2509                 IXL_TX_UNLOCK(txr);
2510                 IXL_TX_LOCK_DESTROY(txr);
2511
2512                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2513                         continue;
2514                 IXL_RX_LOCK(rxr);
2515                 ixl_free_que_rx(que);
2516                 if (rxr->base)
2517                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2518                 IXL_RX_UNLOCK(rxr);
2519                 IXL_RX_LOCK_DESTROY(rxr);
2520                 
2521         }
2522         free(vsi->queues, M_DEVBUF);
2523 }
2524
2525
2526 /*
2527 ** ixlv_config_rss - setup RSS 
2528 **
2529 ** RSS keys and table are cleared on VF reset.
2530 */
2531 static void
2532 ixlv_config_rss(struct ixlv_sc *sc)
2533 {
2534         struct i40e_hw  *hw = &sc->hw;
2535         struct ixl_vsi  *vsi = &sc->vsi;
2536         u32             lut = 0;
2537         u64             set_hena = 0, hena;
2538         int             i, j, que_id;
2539 #ifdef RSS
2540         u32             rss_hash_config;
2541         u32             rss_seed[IXL_KEYSZ];
2542 #else
2543         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2544                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2545                             0x35897377, 0x328b25e1, 0x4fa98922,
2546                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2547 #endif
2548         
2549         /* Don't set up RSS if using a single queue */
2550         if (vsi->num_queues == 1) {
2551                 wr32(hw, I40E_VFQF_HENA(0), 0);
2552                 wr32(hw, I40E_VFQF_HENA(1), 0);
2553                 ixl_flush(hw);
2554                 return;
2555         }
2556
2557 #ifdef RSS
2558         /* Fetch the configured RSS key */
2559         rss_getkey((uint8_t *) &rss_seed);
2560 #endif
2561         /* Fill out hash function seed */
2562         for (i = 0; i <= IXL_KEYSZ; i++)
2563                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2564
2565         /* Enable PCTYPES for RSS: */
2566 #ifdef RSS
2567         rss_hash_config = rss_gethashconfig();
2568         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2569                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2570         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2571                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2572         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2573                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2574         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2575                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2576         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2577                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2578         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2579                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2580         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2581                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2582 #else
2583         set_hena =
2584                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2585                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2586                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2587                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2588                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2589                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2590                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2591                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2592                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2593                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2594                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2595 #endif
2596         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2597             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2598         hena |= set_hena;
2599         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2600         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2601
2602         // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
2603         /* Populate the LUT with max no. of queues in round robin fashion */
2604         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2605                 if (j == vsi->num_queues)
2606                         j = 0;
2607 #ifdef RSS
2608                 /*
2609                  * Fetch the RSS bucket id for the given indirection entry.
2610                  * Cap it at the number of configured buckets (which is
2611                  * num_queues.)
2612                  */
2613                 que_id = rss_get_indirection_to_bucket(i);
2614                 que_id = que_id % vsi->num_queues;
2615 #else
2616                 que_id = j;
2617 #endif
2618                 /* lut = 4-byte sliding window of 4 lut entries */
2619                 lut = (lut << 8) | (que_id & 0xF);
2620                 /* On i = 3, we have 4 entries in lut; write to the register */
2621                 if ((i & 3) == 3) {
2622                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2623                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2624                 }
2625         }
2626         ixl_flush(hw);
2627 }
2628
2629
2630 /*
2631 ** This routine refreshes vlan filters, called by init
2632 ** it scans the filter table and then updates the AQ
2633 */
2634 static void
2635 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2636 {
2637         struct ixl_vsi                  *vsi = &sc->vsi;
2638         struct ixlv_vlan_filter *f;
2639         int                             cnt = 0;
2640
2641         if (vsi->num_vlans == 0)
2642                 return;
2643         /*
2644         ** Scan the filter table for vlan entries,
2645         ** and if found call for the AQ update.
2646         */
2647         SLIST_FOREACH(f, sc->vlan_filters, next)
2648                 if (f->flags & IXL_FILTER_ADD)
2649                         cnt++;
2650         if (cnt > 0)
2651                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2652                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2653 }
2654
2655
2656 /*
2657 ** This routine adds new MAC filters to the sc's list;
2658 ** these are later added in hardware by sending a virtual
2659 ** channel message.
2660 */
2661 static int
2662 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2663 {
2664         struct ixlv_mac_filter  *f;
2665
2666         /* Does one already exist? */
2667         f = ixlv_find_mac_filter(sc, macaddr);
2668         if (f != NULL) {
2669                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2670                     MAC_FORMAT_ARGS(macaddr));
2671                 return (EEXIST);
2672         }
2673
2674         /* If not, get a new empty filter */
2675         f = ixlv_get_mac_filter(sc);
2676         if (f == NULL) {
2677                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2678                     __func__);
2679                 return (ENOMEM);
2680         }
2681
2682         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2683             MAC_FORMAT_ARGS(macaddr));
2684
2685         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2686         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2687         f->flags |= flags;
2688         return (0);
2689 }
2690
2691 /*
2692 ** Marks a MAC filter for deletion.
2693 */
2694 static int
2695 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2696 {
2697         struct ixlv_mac_filter  *f;
2698
2699         f = ixlv_find_mac_filter(sc, macaddr);
2700         if (f == NULL)
2701                 return (ENOENT);
2702
2703         f->flags |= IXL_FILTER_DEL;
2704         return (0);
2705 }
2706
2707 /*
2708 ** Tasklet handler for MSIX Adminq interrupts
2709 **  - done outside interrupt context since it might sleep
2710 */
2711 static void
2712 ixlv_do_adminq(void *context, int pending)
2713 {
2714         struct ixlv_sc          *sc = context;
2715
2716         mtx_lock(&sc->mtx);
2717         ixlv_do_adminq_locked(sc);
2718         mtx_unlock(&sc->mtx);
2719         return;
2720 }
2721
2722 static void
2723 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2724 {
2725         struct i40e_hw                  *hw = &sc->hw;
2726         struct i40e_arq_event_info      event;
2727         struct i40e_virtchnl_msg        *v_msg;
2728         device_t                        dev = sc->dev;
2729         u16                             result = 0;
2730         u32                             reg, oldreg;
2731         i40e_status                     ret;
2732
2733         IXLV_CORE_LOCK_ASSERT(sc);
2734
2735         event.buf_len = IXL_AQ_BUF_SZ;
2736         event.msg_buf = sc->aq_buffer;
2737         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2738
2739         do {
2740                 ret = i40e_clean_arq_element(hw, &event, &result);
2741                 if (ret)
2742                         break;
2743                 ixlv_vc_completion(sc, v_msg->v_opcode,
2744                     v_msg->v_retval, event.msg_buf, event.msg_len);
2745                 if (result != 0)
2746                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2747         } while (result);
2748
2749         /* check for Admin queue errors */
2750         oldreg = reg = rd32(hw, hw->aq.arq.len);
2751         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2752                 device_printf(dev, "ARQ VF Error detected\n");
2753                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2754         }
2755         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2756                 device_printf(dev, "ARQ Overflow Error detected\n");
2757                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2758         }
2759         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2760                 device_printf(dev, "ARQ Critical Error detected\n");
2761                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2762         }
2763         if (oldreg != reg)
2764                 wr32(hw, hw->aq.arq.len, reg);
2765
2766         oldreg = reg = rd32(hw, hw->aq.asq.len);
2767         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2768                 device_printf(dev, "ASQ VF Error detected\n");
2769                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2770         }
2771         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2772                 device_printf(dev, "ASQ Overflow Error detected\n");
2773                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2774         }
2775         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2776                 device_printf(dev, "ASQ Critical Error detected\n");
2777                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2778         }
2779         if (oldreg != reg)
2780                 wr32(hw, hw->aq.asq.len, reg);
2781
2782         ixlv_enable_adminq_irq(hw);
2783 }
2784
2785 static void
2786 ixlv_add_sysctls(struct ixlv_sc *sc)
2787 {
2788         device_t dev = sc->dev;
2789         struct ixl_vsi *vsi = &sc->vsi;
2790         struct i40e_eth_stats *es = &vsi->eth_stats;
2791
2792         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2793         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2794         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2795
2796         struct sysctl_oid *vsi_node, *queue_node;
2797         struct sysctl_oid_list *vsi_list, *queue_list;
2798
2799 #define QUEUE_NAME_LEN 32
2800         char queue_namebuf[QUEUE_NAME_LEN];
2801
2802         struct ixl_queue *queues = vsi->queues;
2803         struct tx_ring *txr;
2804         struct rx_ring *rxr;
2805
2806         /* Driver statistics sysctls */
2807         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2808                         CTLFLAG_RD, &sc->watchdog_events,
2809                         "Watchdog timeouts");
2810         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2811                         CTLFLAG_RD, &sc->admin_irq,
2812                         "Admin Queue IRQ Handled");
2813
2814         /* VSI statistics sysctls */
2815         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2816                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2817         vsi_list = SYSCTL_CHILDREN(vsi_node);
2818
2819         struct ixl_sysctl_info ctls[] =
2820         {
2821                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2822                 {&es->rx_unicast, "ucast_pkts_rcvd",
2823                         "Unicast Packets Received"},
2824                 {&es->rx_multicast, "mcast_pkts_rcvd",
2825                         "Multicast Packets Received"},
2826                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2827                         "Broadcast Packets Received"},
2828                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2829                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2830                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2831                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2832                 {&es->tx_multicast, "mcast_pkts_txd",
2833                         "Multicast Packets Transmitted"},
2834                 {&es->tx_broadcast, "bcast_pkts_txd",
2835                         "Broadcast Packets Transmitted"},
2836                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2837                 // end
2838                 {0,0,0}
2839         };
2840         struct ixl_sysctl_info *entry = ctls;
2841         while (entry->stat != NULL)
2842         {
2843                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2844                                 CTLFLAG_RD, entry->stat,
2845                                 entry->description);
2846                 entry++;
2847         }
2848
2849         /* Queue sysctls */
2850         for (int q = 0; q < vsi->num_queues; q++) {
2851                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2852                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2853                                              CTLFLAG_RD, NULL, "Queue Name");
2854                 queue_list = SYSCTL_CHILDREN(queue_node);
2855
2856                 txr = &(queues[q].txr);
2857                 rxr = &(queues[q].rxr);
2858
2859                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2860                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2861                                 "m_defrag() failed");
2862                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2863                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2864                                 "Driver dropped packets");
2865                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2866                                 CTLFLAG_RD, &(queues[q].irqs),
2867                                 "irqs on this queue");
2868                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2869                                 CTLFLAG_RD, &(queues[q].tso),
2870                                 "TSO");
2871                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2872                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2873                                 "Driver tx dma failure in xmit");
2874                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2875                                 CTLFLAG_RD, &(txr->no_desc),
2876                                 "Queue No Descriptor Available");
2877                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2878                                 CTLFLAG_RD, &(txr->total_packets),
2879                                 "Queue Packets Transmitted");
2880                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2881                                 CTLFLAG_RD, &(txr->tx_bytes),
2882                                 "Queue Bytes Transmitted");
2883                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2884                                 CTLFLAG_RD, &(rxr->rx_packets),
2885                                 "Queue Packets Received");
2886                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2887                                 CTLFLAG_RD, &(rxr->rx_bytes),
2888                                 "Queue Bytes Received");
2889
2890                 /* Examine queue state */
2891                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2892                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2893                                 sizeof(struct ixl_queue),
2894                                 ixlv_sysctl_qtx_tail_handler, "IU",
2895                                 "Queue Transmit Descriptor Tail");
2896                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2897                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2898                                 sizeof(struct ixl_queue),
2899                                 ixlv_sysctl_qrx_tail_handler, "IU",
2900                                 "Queue Receive Descriptor Tail");
2901         }
2902 }
2903
2904 static void
2905 ixlv_init_filters(struct ixlv_sc *sc)
2906 {
2907         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2908             M_DEVBUF, M_NOWAIT | M_ZERO);
2909         SLIST_INIT(sc->mac_filters);
2910         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2911             M_DEVBUF, M_NOWAIT | M_ZERO);
2912         SLIST_INIT(sc->vlan_filters);
2913         return;
2914 }
2915
2916 static void
2917 ixlv_free_filters(struct ixlv_sc *sc)
2918 {
2919         struct ixlv_mac_filter *f;
2920         struct ixlv_vlan_filter *v;
2921
2922         while (!SLIST_EMPTY(sc->mac_filters)) {
2923                 f = SLIST_FIRST(sc->mac_filters);
2924                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2925                 free(f, M_DEVBUF);
2926         }
2927         while (!SLIST_EMPTY(sc->vlan_filters)) {
2928                 v = SLIST_FIRST(sc->vlan_filters);
2929                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2930                 free(v, M_DEVBUF);
2931         }
2932         return;
2933 }
2934
2935 /**
2936  * ixlv_sysctl_qtx_tail_handler
2937  * Retrieves I40E_QTX_TAIL1 value from hardware
2938  * for a sysctl.
2939  */
2940 static int 
2941 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2942 {
2943         struct ixl_queue *que;
2944         int error;
2945         u32 val;
2946
2947         que = ((struct ixl_queue *)oidp->oid_arg1);
2948         if (!que) return 0;
2949
2950         val = rd32(que->vsi->hw, que->txr.tail);
2951         error = sysctl_handle_int(oidp, &val, 0, req);
2952         if (error || !req->newptr)
2953                 return error;
2954         return (0);
2955 }
2956
2957 /**
2958  * ixlv_sysctl_qrx_tail_handler
2959  * Retrieves I40E_QRX_TAIL1 value from hardware
2960  * for a sysctl.
2961  */
2962 static int 
2963 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2964 {
2965         struct ixl_queue *que;
2966         int error;
2967         u32 val;
2968
2969         que = ((struct ixl_queue *)oidp->oid_arg1);
2970         if (!que) return 0;
2971
2972         val = rd32(que->vsi->hw, que->rxr.tail);
2973         error = sysctl_handle_int(oidp, &val, 0, req);
2974         if (error || !req->newptr)
2975                 return error;
2976         return (0);
2977 }
2978