]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
Update compiler-rt to 3.7.0 release. This also includes the sanitizer
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40
41 #include "ixl.h"
42 #include "ixlv.h"
43
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixlv_driver_version[] = "1.2.6";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixlv_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixl_vendor_info_t ixlv_vendor_info_array[] =
64 {
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
67         /* required last entry */
68         {0, 0, 0, 0, 0}
69 };
70
71 /*********************************************************************
72  *  Table of branding strings
73  *********************************************************************/
74
75 static char    *ixlv_strings[] = {
76         "Intel(R) Ethernet Connection XL710 VF Driver"
77 };
78
79
80 /*********************************************************************
81  *  Function prototypes
82  *********************************************************************/
83 static int      ixlv_probe(device_t);
84 static int      ixlv_attach(device_t);
85 static int      ixlv_detach(device_t);
86 static int      ixlv_shutdown(device_t);
87 static void     ixlv_init_locked(struct ixlv_sc *);
88 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
89 static void     ixlv_free_pci_resources(struct ixlv_sc *);
90 static int      ixlv_assign_msix(struct ixlv_sc *);
91 static int      ixlv_init_msix(struct ixlv_sc *);
92 static int      ixlv_init_taskqueue(struct ixlv_sc *);
93 static int      ixlv_setup_queues(struct ixlv_sc *);
94 static void     ixlv_config_rss(struct ixlv_sc *);
95 static void     ixlv_stop(struct ixlv_sc *);
96 static void     ixlv_add_multi(struct ixl_vsi *);
97 static void     ixlv_del_multi(struct ixl_vsi *);
98 static void     ixlv_free_queues(struct ixl_vsi *);
99 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
100
101 static int      ixlv_media_change(struct ifnet *);
102 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
103
104 static void     ixlv_local_timer(void *);
105
106 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
107 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
108 static void     ixlv_init_filters(struct ixlv_sc *);
109 static void     ixlv_free_filters(struct ixlv_sc *);
110
111 static void     ixlv_msix_que(void *);
112 static void     ixlv_msix_adminq(void *);
113 static void     ixlv_do_adminq(void *, int);
114 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
115 static void     ixlv_handle_que(void *, int);
116 static int      ixlv_reset(struct ixlv_sc *);
117 static int      ixlv_reset_complete(struct i40e_hw *);
118 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
119 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
120 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
121                     enum i40e_status_code);
122
123 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
127
128 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
131
132 static void     ixlv_init_hw(struct ixlv_sc *);
133 static int      ixlv_setup_vc(struct ixlv_sc *);
134 static int      ixlv_vf_config(struct ixlv_sc *);
135
136 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
137                     struct ifnet *, int);
138
139 static void     ixlv_add_sysctls(struct ixlv_sc *);
140 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
141 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
142
143 /*********************************************************************
144  *  FreeBSD Device Interface Entry Points
145  *********************************************************************/
146
147 static device_method_t ixlv_methods[] = {
148         /* Device interface */
149         DEVMETHOD(device_probe, ixlv_probe),
150         DEVMETHOD(device_attach, ixlv_attach),
151         DEVMETHOD(device_detach, ixlv_detach),
152         DEVMETHOD(device_shutdown, ixlv_shutdown),
153         {0, 0}
154 };
155
156 static driver_t ixlv_driver = {
157         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
158 };
159
160 devclass_t ixlv_devclass;
161 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
162
163 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
164 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
165
166 /*
167 ** TUNEABLE PARAMETERS:
168 */
169
170 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
171                    "IXLV driver parameters");
172
173 /*
174 ** Number of descriptors per ring:
175 **   - TX and RX are the same size
176 */
177 static int ixlv_ringsz = DEFAULT_RING;
178 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
179 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
180     &ixlv_ringsz, 0, "Descriptor Ring Size");
181
182 /* Set to zero to auto calculate  */
183 int ixlv_max_queues = 0;
184 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
185 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
186     &ixlv_max_queues, 0, "Number of Queues");
187
188 /*
189 ** Number of entries in Tx queue buf_ring.
190 ** Increasing this will reduce the number of
191 ** errors when transmitting fragmented UDP
192 ** packets.
193 */
194 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
195 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
196 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
197     &ixlv_txbrsz, 0, "TX Buf Ring Size");
198
199 /*
200 ** Controls for Interrupt Throttling
201 **      - true/false for dynamic adjustment
202 **      - default values for static ITR
203 */
204 int ixlv_dynamic_rx_itr = 0;
205 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
207     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
208
209 int ixlv_dynamic_tx_itr = 0;
210 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
211 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
212     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
213
214 int ixlv_rx_itr = IXL_ITR_8K;
215 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
217     &ixlv_rx_itr, 0, "RX Interrupt Rate");
218
219 int ixlv_tx_itr = IXL_ITR_4K;
220 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
221 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
222     &ixlv_tx_itr, 0, "TX Interrupt Rate");
223
224         
225 /*********************************************************************
226  *  Device identification routine
227  *
228  *  ixlv_probe determines if the driver should be loaded on
229  *  the hardware based on PCI vendor/device id of the device.
230  *
231  *  return BUS_PROBE_DEFAULT on success, positive on failure
232  *********************************************************************/
233
234 static int
235 ixlv_probe(device_t dev)
236 {
237         ixl_vendor_info_t *ent;
238
239         u16     pci_vendor_id, pci_device_id;
240         u16     pci_subvendor_id, pci_subdevice_id;
241         char    device_name[256];
242
243         INIT_DEBUGOUT("ixlv_probe: begin");
244
245         pci_vendor_id = pci_get_vendor(dev);
246         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
247                 return (ENXIO);
248
249         pci_device_id = pci_get_device(dev);
250         pci_subvendor_id = pci_get_subvendor(dev);
251         pci_subdevice_id = pci_get_subdevice(dev);
252
253         ent = ixlv_vendor_info_array;
254         while (ent->vendor_id != 0) {
255                 if ((pci_vendor_id == ent->vendor_id) &&
256                     (pci_device_id == ent->device_id) &&
257
258                     ((pci_subvendor_id == ent->subvendor_id) ||
259                      (ent->subvendor_id == 0)) &&
260
261                     ((pci_subdevice_id == ent->subdevice_id) ||
262                      (ent->subdevice_id == 0))) {
263                         sprintf(device_name, "%s, Version - %s",
264                                 ixlv_strings[ent->index],
265                                 ixlv_driver_version);
266                         device_set_desc_copy(dev, device_name);
267                         return (BUS_PROBE_DEFAULT);
268                 }
269                 ent++;
270         }
271         return (ENXIO);
272 }
273
274 /*********************************************************************
275  *  Device initialization routine
276  *
277  *  The attach entry point is called when the driver is being loaded.
278  *  This routine identifies the type of hardware, allocates all resources
279  *  and initializes the hardware.
280  *
281  *  return 0 on success, positive on failure
282  *********************************************************************/
283
284 static int
285 ixlv_attach(device_t dev)
286 {
287         struct ixlv_sc  *sc;
288         struct i40e_hw  *hw;
289         struct ixl_vsi  *vsi;
290         int             error = 0;
291
292         INIT_DBG_DEV(dev, "begin");
293
294         /* Allocate, clear, and link in our primary soft structure */
295         sc = device_get_softc(dev);
296         sc->dev = sc->osdep.dev = dev;
297         hw = &sc->hw;
298         vsi = &sc->vsi;
299         vsi->dev = dev;
300
301         /* Initialize hw struct */
302         ixlv_init_hw(sc);
303
304         /* Allocate filter lists */
305         ixlv_init_filters(sc);
306
307         /* Core Lock Init*/
308         mtx_init(&sc->mtx, device_get_nameunit(dev),
309             "IXL SC Lock", MTX_DEF);
310
311         /* Set up the timer callout */
312         callout_init_mtx(&sc->timer, &sc->mtx, 0);
313
314         /* Do PCI setup - map BAR0, etc */
315         if (ixlv_allocate_pci_resources(sc)) {
316                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
317                     __func__);
318                 error = ENXIO;
319                 goto err_early;
320         }
321
322         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
323
324         error = i40e_set_mac_type(hw);
325         if (error) {
326                 device_printf(dev, "%s: set_mac_type failed: %d\n",
327                     __func__, error);
328                 goto err_pci_res;
329         }
330
331         error = ixlv_reset_complete(hw);
332         if (error) {
333                 device_printf(dev, "%s: Device is still being reset\n",
334                     __func__);
335                 goto err_pci_res;
336         }
337
338         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
339
340         error = ixlv_setup_vc(sc);
341         if (error) {
342                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
343                     __func__, error);
344                 goto err_pci_res;
345         }
346
347         INIT_DBG_DEV(dev, "PF API version verified");
348
349         /* TODO: Figure out why MDD events occur when this reset is removed. */
350         /* Need API version before sending reset message */
351         error = ixlv_reset(sc);
352         if (error) {
353                 device_printf(dev, "VF reset failed; reload the driver\n");
354                 goto err_aq;
355         }
356
357         INIT_DBG_DEV(dev, "VF reset complete");
358
359         /* Ask for VF config from PF */
360         error = ixlv_vf_config(sc);
361         if (error) {
362                 device_printf(dev, "Error getting configuration from PF: %d\n",
363                     error);
364                 goto err_aq;
365         }
366
367         INIT_DBG_DEV(dev, "VF config from PF:");
368         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
369             sc->vf_res->num_vsis,
370             sc->vf_res->num_queue_pairs,
371             sc->vf_res->max_vectors,
372             sc->vf_res->max_mtu);
373         INIT_DBG_DEV(dev, "Offload flags: %#010x",
374             sc->vf_res->vf_offload_flags);
375
376         // TODO: Move this into ixlv_vf_config?
377         /* got VF config message back from PF, now we can parse it */
378         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
379                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
380                         sc->vsi_res = &sc->vf_res->vsi_res[i];
381         }
382         if (!sc->vsi_res) {
383                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
384                 error = EIO;
385                 goto err_res_buf;
386         }
387
388         INIT_DBG_DEV(dev, "Resource Acquisition complete");
389
390         /* If no mac address was assigned just make a random one */
391         if (!ixlv_check_ether_addr(hw->mac.addr)) {
392                 u8 addr[ETHER_ADDR_LEN];
393                 arc4rand(&addr, sizeof(addr), 0);
394                 addr[0] &= 0xFE;
395                 addr[0] |= 0x02;
396                 bcopy(addr, hw->mac.addr, sizeof(addr));
397         }
398
399         vsi->id = sc->vsi_res->vsi_id;
400         vsi->back = (void *)sc;
401         sc->link_up = TRUE;
402
403         /* This allocates the memory and early settings */
404         if (ixlv_setup_queues(sc) != 0) {
405                 device_printf(dev, "%s: setup queues failed!\n",
406                     __func__);
407                 error = EIO;
408                 goto out;
409         }
410
411         /* Setup the stack interface */
412         if (ixlv_setup_interface(dev, sc) != 0) {
413                 device_printf(dev, "%s: setup interface failed!\n",
414                     __func__);
415                 error = EIO;
416                 goto out;
417         }
418
419         INIT_DBG_DEV(dev, "Queue memory and interface setup");
420
421         /* Do queue interrupt setup */
422         ixlv_assign_msix(sc);
423
424         /* Start AdminQ taskqueue */
425         ixlv_init_taskqueue(sc);
426
427         /* Initialize stats */
428         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
429         ixlv_add_sysctls(sc);
430
431         /* Register for VLAN events */
432         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
433             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
434         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
435             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
436
437         /* We want AQ enabled early */
438         ixlv_enable_adminq_irq(hw);
439
440         /* Set things up to run init */
441         sc->init_state = IXLV_INIT_READY;
442
443         ixl_vc_init_mgr(sc, &sc->vc_mgr);
444
445         INIT_DBG_DEV(dev, "end");
446         return (error);
447
448 out:
449         ixlv_free_queues(vsi);
450 err_res_buf:
451         free(sc->vf_res, M_DEVBUF);
452 err_aq:
453         i40e_shutdown_adminq(hw);
454 err_pci_res:
455         ixlv_free_pci_resources(sc);
456 err_early:
457         mtx_destroy(&sc->mtx);
458         ixlv_free_filters(sc);
459         INIT_DBG_DEV(dev, "end: error %d", error);
460         return (error);
461 }
462
463 /*********************************************************************
464  *  Device removal routine
465  *
466  *  The detach entry point is called when the driver is being removed.
467  *  This routine stops the adapter and deallocates all the resources
468  *  that were allocated for driver operation.
469  *
470  *  return 0 on success, positive on failure
471  *********************************************************************/
472
473 static int
474 ixlv_detach(device_t dev)
475 {
476         struct ixlv_sc  *sc = device_get_softc(dev);
477         struct ixl_vsi  *vsi = &sc->vsi;
478
479         INIT_DBG_DEV(dev, "begin");
480
481         /* Make sure VLANS are not using driver */
482         if (vsi->ifp->if_vlantrunk != NULL) {
483                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
484                 INIT_DBG_DEV(dev, "end");
485                 return (EBUSY);
486         }
487
488         /* Stop driver */
489         ether_ifdetach(vsi->ifp);
490         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
491                 mtx_lock(&sc->mtx);     
492                 ixlv_stop(sc);
493                 mtx_unlock(&sc->mtx);   
494         }
495
496         /* Unregister VLAN events */
497         if (vsi->vlan_attach != NULL)
498                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
499         if (vsi->vlan_detach != NULL)
500                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
501
502         /* Drain VC mgr */
503         callout_drain(&sc->vc_mgr.callout);
504
505         i40e_shutdown_adminq(&sc->hw);
506         taskqueue_free(sc->tq);
507         if_free(vsi->ifp);
508         free(sc->vf_res, M_DEVBUF);
509         ixlv_free_pci_resources(sc);
510         ixlv_free_queues(vsi);
511         mtx_destroy(&sc->mtx);
512         ixlv_free_filters(sc);
513
514         bus_generic_detach(dev);
515         INIT_DBG_DEV(dev, "end");
516         return (0);
517 }
518
519 /*********************************************************************
520  *
521  *  Shutdown entry point
522  *
523  **********************************************************************/
524
525 static int
526 ixlv_shutdown(device_t dev)
527 {
528         struct ixlv_sc  *sc = device_get_softc(dev);
529
530         INIT_DBG_DEV(dev, "begin");
531
532         mtx_lock(&sc->mtx);     
533         ixlv_stop(sc);
534         mtx_unlock(&sc->mtx);   
535
536         INIT_DBG_DEV(dev, "end");
537         return (0);
538 }
539
540 /*
541  * Configure TXCSUM(IPV6) and TSO(4/6)
542  *      - the hardware handles these together so we
543  *        need to tweak them 
544  */
545 static void
546 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
547 {
548         /* Enable/disable TXCSUM/TSO4 */
549         if (!(ifp->if_capenable & IFCAP_TXCSUM)
550             && !(ifp->if_capenable & IFCAP_TSO4)) {
551                 if (mask & IFCAP_TXCSUM) {
552                         ifp->if_capenable |= IFCAP_TXCSUM;
553                         /* enable TXCSUM, restore TSO if previously enabled */
554                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
555                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
556                                 ifp->if_capenable |= IFCAP_TSO4;
557                         }
558                 }
559                 else if (mask & IFCAP_TSO4) {
560                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
561                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
562                         if_printf(ifp,
563                             "TSO4 requires txcsum, enabling both...\n");
564                 }
565         } else if((ifp->if_capenable & IFCAP_TXCSUM)
566             && !(ifp->if_capenable & IFCAP_TSO4)) {
567                 if (mask & IFCAP_TXCSUM)
568                         ifp->if_capenable &= ~IFCAP_TXCSUM;
569                 else if (mask & IFCAP_TSO4)
570                         ifp->if_capenable |= IFCAP_TSO4;
571         } else if((ifp->if_capenable & IFCAP_TXCSUM)
572             && (ifp->if_capenable & IFCAP_TSO4)) {
573                 if (mask & IFCAP_TXCSUM) {
574                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
575                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
576                         if_printf(ifp, 
577                             "TSO4 requires txcsum, disabling both...\n");
578                 } else if (mask & IFCAP_TSO4)
579                         ifp->if_capenable &= ~IFCAP_TSO4;
580         }
581
582         /* Enable/disable TXCSUM_IPV6/TSO6 */
583         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
584             && !(ifp->if_capenable & IFCAP_TSO6)) {
585                 if (mask & IFCAP_TXCSUM_IPV6) {
586                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
587                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
588                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
589                                 ifp->if_capenable |= IFCAP_TSO6;
590                         }
591                 } else if (mask & IFCAP_TSO6) {
592                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
593                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
594                         if_printf(ifp,
595                             "TSO6 requires txcsum6, enabling both...\n");
596                 }
597         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
598             && !(ifp->if_capenable & IFCAP_TSO6)) {
599                 if (mask & IFCAP_TXCSUM_IPV6)
600                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
601                 else if (mask & IFCAP_TSO6)
602                         ifp->if_capenable |= IFCAP_TSO6;
603         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
604             && (ifp->if_capenable & IFCAP_TSO6)) {
605                 if (mask & IFCAP_TXCSUM_IPV6) {
606                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
607                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
608                         if_printf(ifp,
609                             "TSO6 requires txcsum6, disabling both...\n");
610                 } else if (mask & IFCAP_TSO6)
611                         ifp->if_capenable &= ~IFCAP_TSO6;
612         }
613 }
614
615 /*********************************************************************
616  *  Ioctl entry point
617  *
618  *  ixlv_ioctl is called when the user wants to configure the
619  *  interface.
620  *
621  *  return 0 on success, positive on failure
622  **********************************************************************/
623
624 static int
625 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
626 {
627         struct ixl_vsi          *vsi = ifp->if_softc;
628         struct ixlv_sc  *sc = vsi->back;
629         struct ifreq            *ifr = (struct ifreq *)data;
630 #if defined(INET) || defined(INET6)
631         struct ifaddr           *ifa = (struct ifaddr *)data;
632         bool                    avoid_reset = FALSE;
633 #endif
634         int                     error = 0;
635
636
637         switch (command) {
638
639         case SIOCSIFADDR:
640 #ifdef INET
641                 if (ifa->ifa_addr->sa_family == AF_INET)
642                         avoid_reset = TRUE;
643 #endif
644 #ifdef INET6
645                 if (ifa->ifa_addr->sa_family == AF_INET6)
646                         avoid_reset = TRUE;
647 #endif
648 #if defined(INET) || defined(INET6)
649                 /*
650                 ** Calling init results in link renegotiation,
651                 ** so we avoid doing it when possible.
652                 */
653                 if (avoid_reset) {
654                         ifp->if_flags |= IFF_UP;
655                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
656                                 ixlv_init(vsi);
657 #ifdef INET
658                         if (!(ifp->if_flags & IFF_NOARP))
659                                 arp_ifinit(ifp, ifa);
660 #endif
661                 } else
662                         error = ether_ioctl(ifp, command, data);
663                 break;
664 #endif
665         case SIOCSIFMTU:
666                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
667                 mtx_lock(&sc->mtx);
668                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
669                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
670                         error = EINVAL;
671                         IOCTL_DBG_IF(ifp, "mtu too large");
672                 } else {
673                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
674                         // ERJ: Interestingly enough, these types don't match
675                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
676                         vsi->max_frame_size =
677                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
678                             + ETHER_VLAN_ENCAP_LEN;
679                         ixlv_init_locked(sc);
680                 }
681                 mtx_unlock(&sc->mtx);
682                 break;
683         case SIOCSIFFLAGS:
684                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
685                 mtx_lock(&sc->mtx);
686                 if (ifp->if_flags & IFF_UP) {
687                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
688                                 ixlv_init_locked(sc);
689                 } else
690                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
691                                 ixlv_stop(sc);
692                 sc->if_flags = ifp->if_flags;
693                 mtx_unlock(&sc->mtx);
694                 break;
695         case SIOCADDMULTI:
696                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
697                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
698                         mtx_lock(&sc->mtx);
699                         ixlv_disable_intr(vsi);
700                         ixlv_add_multi(vsi);
701                         ixlv_enable_intr(vsi);
702                         mtx_unlock(&sc->mtx);
703                 }
704                 break;
705         case SIOCDELMULTI:
706                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
707                 if (sc->init_state == IXLV_RUNNING) {
708                         mtx_lock(&sc->mtx);
709                         ixlv_disable_intr(vsi);
710                         ixlv_del_multi(vsi);
711                         ixlv_enable_intr(vsi);
712                         mtx_unlock(&sc->mtx);
713                 }
714                 break;
715         case SIOCSIFMEDIA:
716         case SIOCGIFMEDIA:
717                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
718                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
719                 break;
720         case SIOCSIFCAP:
721         {
722                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
723                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
724
725                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
726
727                 if (mask & IFCAP_RXCSUM)
728                         ifp->if_capenable ^= IFCAP_RXCSUM;
729                 if (mask & IFCAP_RXCSUM_IPV6)
730                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
731                 if (mask & IFCAP_LRO)
732                         ifp->if_capenable ^= IFCAP_LRO;
733                 if (mask & IFCAP_VLAN_HWTAGGING)
734                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
735                 if (mask & IFCAP_VLAN_HWFILTER)
736                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
737                 if (mask & IFCAP_VLAN_HWTSO)
738                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
739                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
740                         ixlv_init(vsi);
741                 }
742                 VLAN_CAPABILITIES(ifp);
743
744                 break;
745         }
746
747         default:
748                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
749                 error = ether_ioctl(ifp, command, data);
750                 break;
751         }
752
753         return (error);
754 }
755
756 /*
757 ** To do a reinit on the VF is unfortunately more complicated
758 ** than a physical device, we must have the PF more or less
759 ** completely recreate our memory, so many things that were
760 ** done only once at attach in traditional drivers now must be
761 ** redone at each reinitialization. This function does that
762 ** 'prelude' so we can then call the normal locked init code.
763 */
764 int
765 ixlv_reinit_locked(struct ixlv_sc *sc)
766 {
767         struct i40e_hw          *hw = &sc->hw;
768         struct ixl_vsi          *vsi = &sc->vsi;
769         struct ifnet            *ifp = vsi->ifp;
770         struct ixlv_mac_filter  *mf, *mf_temp;
771         struct ixlv_vlan_filter *vf;
772         int                     error = 0;
773
774         INIT_DBG_IF(ifp, "begin");
775
776         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
777                 ixlv_stop(sc);
778
779         error = ixlv_reset(sc);
780
781         INIT_DBG_IF(ifp, "VF was reset");
782
783         /* set the state in case we went thru RESET */
784         sc->init_state = IXLV_RUNNING;
785
786         /*
787         ** Resetting the VF drops all filters from hardware;
788         ** we need to mark them to be re-added in init.
789         */
790         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
791                 if (mf->flags & IXL_FILTER_DEL) {
792                         SLIST_REMOVE(sc->mac_filters, mf,
793                             ixlv_mac_filter, next);
794                         free(mf, M_DEVBUF);
795                 } else
796                         mf->flags |= IXL_FILTER_ADD;
797         }
798         if (vsi->num_vlans != 0)
799                 SLIST_FOREACH(vf, sc->vlan_filters, next)
800                         vf->flags = IXL_FILTER_ADD;
801         else { /* clean any stale filters */
802                 while (!SLIST_EMPTY(sc->vlan_filters)) {
803                         vf = SLIST_FIRST(sc->vlan_filters);
804                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
805                         free(vf, M_DEVBUF);
806                 }
807         }
808
809         ixlv_enable_adminq_irq(hw);
810         ixl_vc_flush(&sc->vc_mgr);
811
812         INIT_DBG_IF(ifp, "end");
813         return (error);
814 }
815
816 static void
817 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
818         enum i40e_status_code code)
819 {
820         struct ixlv_sc *sc;
821
822         sc = arg;
823
824         /*
825          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
826          * happens while a command is in progress, so we don't print an error
827          * in that case.
828          */
829         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
830                 if_printf(sc->vsi.ifp,
831                     "Error %d waiting for PF to complete operation %d\n",
832                     code, cmd->request);
833         }
834 }
835
836 static void
837 ixlv_init_locked(struct ixlv_sc *sc)
838 {
839         struct i40e_hw          *hw = &sc->hw;
840         struct ixl_vsi          *vsi = &sc->vsi;
841         struct ixl_queue        *que = vsi->queues;
842         struct ifnet            *ifp = vsi->ifp;
843         int                      error = 0;
844
845         INIT_DBG_IF(ifp, "begin");
846
847         IXLV_CORE_LOCK_ASSERT(sc);
848
849         /* Do a reinit first if an init has already been done */
850         if ((sc->init_state == IXLV_RUNNING) ||
851             (sc->init_state == IXLV_RESET_REQUIRED) ||
852             (sc->init_state == IXLV_RESET_PENDING))
853                 error = ixlv_reinit_locked(sc);
854         /* Don't bother with init if we failed reinit */
855         if (error)
856                 goto init_done;
857
858         /* Remove existing MAC filter if new MAC addr is set */
859         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
860                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
861                 if (error == 0)
862                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
863                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
864                             sc);
865         }
866
867         /* Check for an LAA mac address... */
868         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
869
870         ifp->if_hwassist = 0;
871         if (ifp->if_capenable & IFCAP_TSO)
872                 ifp->if_hwassist |= CSUM_TSO;
873         if (ifp->if_capenable & IFCAP_TXCSUM)
874                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
875         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
876                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
877
878         /* Add mac filter for this VF to PF */
879         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
880                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
881                 if (!error || error == EEXIST)
882                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
883                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
884                             sc);
885         }
886
887         /* Setup vlan's if needed */
888         ixlv_setup_vlan_filters(sc);
889
890         /* Prepare the queues for operation */
891         for (int i = 0; i < vsi->num_queues; i++, que++) {
892                 struct  rx_ring *rxr = &que->rxr;
893
894                 ixl_init_tx_ring(que);
895
896                 if (vsi->max_frame_size <= MCLBYTES)
897                         rxr->mbuf_sz = MCLBYTES;
898                 else
899                         rxr->mbuf_sz = MJUMPAGESIZE;
900                 ixl_init_rx_ring(que);
901         }
902
903         /* Configure queues */
904         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
905             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
906
907         /* Set up RSS */
908         ixlv_config_rss(sc);
909
910         /* Map vectors */
911         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
912             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
913
914         /* Enable queues */
915         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
916             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
917
918         /* Start the local timer */
919         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
920
921         sc->init_state = IXLV_RUNNING;
922
923 init_done:
924         INIT_DBG_IF(ifp, "end");
925         return;
926 }
927
928 /*
929 **  Init entry point for the stack
930 */
931 void
932 ixlv_init(void *arg)
933 {
934         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
935         struct ixlv_sc *sc = vsi->back;
936         int retries = 0;
937
938         mtx_lock(&sc->mtx);
939         ixlv_init_locked(sc);
940         mtx_unlock(&sc->mtx);
941
942         /* Wait for init_locked to finish */
943         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
944             && ++retries < 100) {
945                 i40e_msec_delay(10);
946         }
947         if (retries >= IXLV_AQ_MAX_ERR)
948                 if_printf(vsi->ifp,
949                     "Init failed to complete in alloted time!\n");
950 }
951
952 /*
953  * ixlv_attach() helper function; gathers information about
954  * the (virtual) hardware for use elsewhere in the driver.
955  */
956 static void
957 ixlv_init_hw(struct ixlv_sc *sc)
958 {
959         struct i40e_hw *hw = &sc->hw;
960         device_t dev = sc->dev;
961         
962         /* Save off the information about this board */
963         hw->vendor_id = pci_get_vendor(dev);
964         hw->device_id = pci_get_device(dev);
965         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
966         hw->subsystem_vendor_id =
967             pci_read_config(dev, PCIR_SUBVEND_0, 2);
968         hw->subsystem_device_id =
969             pci_read_config(dev, PCIR_SUBDEV_0, 2);
970
971         hw->bus.device = pci_get_slot(dev);
972         hw->bus.func = pci_get_function(dev);
973 }
974
975 /*
976  * ixlv_attach() helper function; initalizes the admin queue
977  * and attempts to establish contact with the PF by
978  * retrying the initial "API version" message several times
979  * or until the PF responds.
980  */
981 static int
982 ixlv_setup_vc(struct ixlv_sc *sc)
983 {
984         struct i40e_hw *hw = &sc->hw;
985         device_t dev = sc->dev;
986         int error = 0, ret_error = 0, asq_retries = 0;
987         bool send_api_ver_retried = 0;
988
989         /* Need to set these AQ paramters before initializing AQ */
990         hw->aq.num_arq_entries = IXL_AQ_LEN;
991         hw->aq.num_asq_entries = IXL_AQ_LEN;
992         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
993         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
994
995         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
996                 /* Initialize admin queue */
997                 error = i40e_init_adminq(hw);
998                 if (error) {
999                         device_printf(dev, "%s: init_adminq failed: %d\n",
1000                             __func__, error);
1001                         ret_error = 1;
1002                         continue;
1003                 }
1004
1005                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1006
1007 retry_send:
1008                 /* Send VF's API version */
1009                 error = ixlv_send_api_ver(sc);
1010                 if (error) {
1011                         i40e_shutdown_adminq(hw);
1012                         ret_error = 2;
1013                         device_printf(dev, "%s: unable to send api"
1014                             " version to PF on attempt %d, error %d\n",
1015                             __func__, i+1, error);
1016                 }
1017
1018                 asq_retries = 0;
1019                 while (!i40e_asq_done(hw)) {
1020                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1021                                 i40e_shutdown_adminq(hw);
1022                                 DDPRINTF(dev, "Admin Queue timeout "
1023                                     "(waiting for send_api_ver), %d more retries...",
1024                                     IXLV_AQ_MAX_ERR - (i + 1));
1025                                 ret_error = 3;
1026                                 break;
1027                         } 
1028                         i40e_msec_delay(10);
1029                 }
1030                 if (asq_retries > IXLV_AQ_MAX_ERR)
1031                         continue;
1032
1033                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1034
1035                 /* Verify that the VF accepts the PF's API version */
1036                 error = ixlv_verify_api_ver(sc);
1037                 if (error == ETIMEDOUT) {
1038                         if (!send_api_ver_retried) {
1039                                 /* Resend message, one more time */
1040                                 send_api_ver_retried++;
1041                                 device_printf(dev,
1042                                     "%s: Timeout while verifying API version on first"
1043                                     " try!\n", __func__);
1044                                 goto retry_send;
1045                         } else {
1046                                 device_printf(dev,
1047                                     "%s: Timeout while verifying API version on second"
1048                                     " try!\n", __func__);
1049                                 ret_error = 4;
1050                                 break;
1051                         }
1052                 }
1053                 if (error) {
1054                         device_printf(dev,
1055                             "%s: Unable to verify API version,"
1056                             " error %d\n", __func__, error);
1057                         ret_error = 5;
1058                 }
1059                 break;
1060         }
1061
1062         if (ret_error >= 4)
1063                 i40e_shutdown_adminq(hw);
1064         return (ret_error);
1065 }
1066
1067 /*
1068  * ixlv_attach() helper function; asks the PF for this VF's
1069  * configuration, and saves the information if it receives it.
1070  */
1071 static int
1072 ixlv_vf_config(struct ixlv_sc *sc)
1073 {
1074         struct i40e_hw *hw = &sc->hw;
1075         device_t dev = sc->dev;
1076         int bufsz, error = 0, ret_error = 0;
1077         int asq_retries, retried = 0;
1078
1079 retry_config:
1080         error = ixlv_send_vf_config_msg(sc);
1081         if (error) {
1082                 device_printf(dev,
1083                     "%s: Unable to send VF config request, attempt %d,"
1084                     " error %d\n", __func__, retried + 1, error);
1085                 ret_error = 2;
1086         }
1087
1088         asq_retries = 0;
1089         while (!i40e_asq_done(hw)) {
1090                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1091                         device_printf(dev, "%s: Admin Queue timeout "
1092                             "(waiting for send_vf_config_msg), attempt %d\n",
1093                             __func__, retried + 1);
1094                         ret_error = 3;
1095                         goto fail;
1096                 }
1097                 i40e_msec_delay(10);
1098         }
1099
1100         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1101             retried + 1);
1102
1103         if (!sc->vf_res) {
1104                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1105                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1106                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1107                 if (!sc->vf_res) {
1108                         device_printf(dev,
1109                             "%s: Unable to allocate memory for VF configuration"
1110                             " message from PF on attempt %d\n", __func__, retried + 1);
1111                         ret_error = 1;
1112                         goto fail;
1113                 }
1114         }
1115
1116         /* Check for VF config response */
1117         error = ixlv_get_vf_config(sc);
1118         if (error == ETIMEDOUT) {
1119                 /* The 1st time we timeout, send the configuration message again */
1120                 if (!retried) {
1121                         retried++;
1122                         goto retry_config;
1123                 }
1124         }
1125         if (error) {
1126                 device_printf(dev,
1127                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1128                     __func__, retried + 1);
1129                 ret_error = 4;
1130         }
1131         goto done;
1132
1133 fail:
1134         free(sc->vf_res, M_DEVBUF);
1135 done:
1136         return (ret_error);
1137 }
1138
1139 /*
1140  * Allocate MSI/X vectors, setup the AQ vector early
1141  */
1142 static int
1143 ixlv_init_msix(struct ixlv_sc *sc)
1144 {
1145         device_t dev = sc->dev;
1146         int rid, want, vectors, queues, available;
1147
1148         rid = PCIR_BAR(IXL_BAR);
1149         sc->msix_mem = bus_alloc_resource_any(dev,
1150             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1151         if (!sc->msix_mem) {
1152                 /* May not be enabled */
1153                 device_printf(sc->dev,
1154                     "Unable to map MSIX table \n");
1155                 goto fail;
1156         }
1157
1158         available = pci_msix_count(dev); 
1159         if (available == 0) { /* system has msix disabled */
1160                 bus_release_resource(dev, SYS_RES_MEMORY,
1161                     rid, sc->msix_mem);
1162                 sc->msix_mem = NULL;
1163                 goto fail;
1164         }
1165
1166         /* Figure out a reasonable auto config value */
1167         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1168
1169         /* Override with hardcoded value if sane */
1170         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1171                 queues = ixlv_max_queues;
1172 #ifdef  RSS
1173         /* If we're doing RSS, clamp at the number of RSS buckets */
1174         if (queues > rss_getnumbuckets())
1175                 queues = rss_getnumbuckets();
1176 #endif
1177         /* Enforce the VF max value */
1178         if (queues > IXLV_MAX_QUEUES)
1179                 queues = IXLV_MAX_QUEUES;
1180
1181         /*
1182         ** Want one vector (RX/TX pair) per queue
1183         ** plus an additional for the admin queue.
1184         */
1185         want = queues + 1;
1186         if (want <= available)  /* Have enough */
1187                 vectors = want;
1188         else {
1189                 device_printf(sc->dev,
1190                     "MSIX Configuration Problem, "
1191                     "%d vectors available but %d wanted!\n",
1192                     available, want);
1193                 goto fail;
1194         }
1195
1196 #ifdef RSS
1197         /*
1198         * If we're doing RSS, the number of queues needs to
1199         * match the number of RSS buckets that are configured.
1200         *
1201         * + If there's more queues than RSS buckets, we'll end
1202         *   up with queues that get no traffic.
1203         *
1204         * + If there's more RSS buckets than queues, we'll end
1205         *   up having multiple RSS buckets map to the same queue,
1206         *   so there'll be some contention.
1207         */
1208         if (queues != rss_getnumbuckets()) {
1209                 device_printf(dev,
1210                     "%s: queues (%d) != RSS buckets (%d)"
1211                     "; performance will be impacted.\n",
1212                      __func__, queues, rss_getnumbuckets());
1213         }
1214 #endif
1215
1216         if (pci_alloc_msix(dev, &vectors) == 0) {
1217                 device_printf(sc->dev,
1218                     "Using MSIX interrupts with %d vectors\n", vectors);
1219                 sc->msix = vectors;
1220                 sc->vsi.num_queues = queues;
1221         }
1222
1223         /*
1224         ** Explicitly set the guest PCI BUSMASTER capability
1225         ** and we must rewrite the ENABLE in the MSIX control
1226         ** register again at this point to cause the host to
1227         ** successfully initialize us.
1228         */
1229         {
1230                 u16 pci_cmd_word;
1231                 int msix_ctrl;
1232                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1233                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1234                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1235                 pci_find_cap(dev, PCIY_MSIX, &rid);
1236                 rid += PCIR_MSIX_CTRL;
1237                 msix_ctrl = pci_read_config(dev, rid, 2);
1238                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1239                 pci_write_config(dev, rid, msix_ctrl, 2);
1240         }
1241
1242         /* Next we need to setup the vector for the Admin Queue */
1243         rid = 1;        // zero vector + 1
1244         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1245             &rid, RF_SHAREABLE | RF_ACTIVE);
1246         if (sc->res == NULL) {
1247                 device_printf(dev,"Unable to allocate"
1248                     " bus resource: AQ interrupt \n");
1249                 goto fail;
1250         }
1251         if (bus_setup_intr(dev, sc->res,
1252             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1253             ixlv_msix_adminq, sc, &sc->tag)) {
1254                 sc->res = NULL;
1255                 device_printf(dev, "Failed to register AQ handler");
1256                 goto fail;
1257         }
1258         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1259
1260         return (vectors);
1261
1262 fail:
1263         /* The VF driver MUST use MSIX */
1264         return (0);
1265 }
1266
1267 static int
1268 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1269 {
1270         int             rid;
1271         device_t        dev = sc->dev;
1272
1273         rid = PCIR_BAR(0);
1274         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1275             &rid, RF_ACTIVE);
1276
1277         if (!(sc->pci_mem)) {
1278                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1279                 return (ENXIO);
1280         }
1281
1282         sc->osdep.mem_bus_space_tag =
1283                 rman_get_bustag(sc->pci_mem);
1284         sc->osdep.mem_bus_space_handle =
1285                 rman_get_bushandle(sc->pci_mem);
1286         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1287         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1288         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1289
1290         sc->hw.back = &sc->osdep;
1291
1292         /* Disable adminq interrupts */
1293         ixlv_disable_adminq_irq(&sc->hw);
1294
1295         /*
1296         ** Now setup MSI/X, it will return
1297         ** us the number of supported vectors
1298         */
1299         sc->msix = ixlv_init_msix(sc);
1300
1301         /* We fail without MSIX support */
1302         if (sc->msix == 0)
1303                 return (ENXIO);
1304
1305         return (0);
1306 }
1307
1308 static void
1309 ixlv_free_pci_resources(struct ixlv_sc *sc)
1310 {
1311         struct ixl_vsi         *vsi = &sc->vsi;
1312         struct ixl_queue       *que = vsi->queues;
1313         device_t                dev = sc->dev;
1314
1315         /* We may get here before stations are setup */
1316         if (que == NULL)
1317                 goto early;
1318
1319         /*
1320         **  Release all msix queue resources:
1321         */
1322         for (int i = 0; i < vsi->num_queues; i++, que++) {
1323                 int rid = que->msix + 1;
1324                 if (que->tag != NULL) {
1325                         bus_teardown_intr(dev, que->res, que->tag);
1326                         que->tag = NULL;
1327                 }
1328                 if (que->res != NULL)
1329                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1330         }
1331         
1332 early:
1333         /* Clean the AdminQ interrupt */
1334         if (sc->tag != NULL) {
1335                 bus_teardown_intr(dev, sc->res, sc->tag);
1336                 sc->tag = NULL;
1337         }
1338         if (sc->res != NULL)
1339                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1340
1341         pci_release_msi(dev);
1342
1343         if (sc->msix_mem != NULL)
1344                 bus_release_resource(dev, SYS_RES_MEMORY,
1345                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1346
1347         if (sc->pci_mem != NULL)
1348                 bus_release_resource(dev, SYS_RES_MEMORY,
1349                     PCIR_BAR(0), sc->pci_mem);
1350
1351         return;
1352 }
1353
1354 /*
1355  * Create taskqueue and tasklet for Admin Queue interrupts.
1356  */
1357 static int
1358 ixlv_init_taskqueue(struct ixlv_sc *sc)
1359 {
1360         int error = 0;
1361
1362         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1363
1364         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1365             taskqueue_thread_enqueue, &sc->tq);
1366         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1367             device_get_nameunit(sc->dev));
1368
1369         return (error);
1370 }
1371
1372 /*********************************************************************
1373  *
1374  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1375  *
1376  **********************************************************************/
1377 static int
1378 ixlv_assign_msix(struct ixlv_sc *sc)
1379 {
1380         device_t        dev = sc->dev;
1381         struct          ixl_vsi *vsi = &sc->vsi;
1382         struct          ixl_queue *que = vsi->queues;
1383         struct          tx_ring  *txr;
1384         int             error, rid, vector = 1;
1385 #ifdef  RSS
1386         cpuset_t        cpu_mask;
1387 #endif
1388
1389         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1390                 int cpu_id = i;
1391                 rid = vector + 1;
1392                 txr = &que->txr;
1393                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1394                     RF_SHAREABLE | RF_ACTIVE);
1395                 if (que->res == NULL) {
1396                         device_printf(dev,"Unable to allocate"
1397                             " bus resource: que interrupt [%d]\n", vector);
1398                         return (ENXIO);
1399                 }
1400                 /* Set the handler function */
1401                 error = bus_setup_intr(dev, que->res,
1402                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1403                     ixlv_msix_que, que, &que->tag);
1404                 if (error) {
1405                         que->res = NULL;
1406                         device_printf(dev, "Failed to register que handler");
1407                         return (error);
1408                 }
1409                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1410                 /* Bind the vector to a CPU */
1411 #ifdef RSS
1412                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1413 #endif
1414                 bus_bind_intr(dev, que->res, cpu_id);
1415                 que->msix = vector;
1416                 vsi->que_mask |= (u64)(1 << que->msix);
1417                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1418                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1419                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1420                     taskqueue_thread_enqueue, &que->tq);
1421 #ifdef RSS
1422                 CPU_SETOF(cpu_id, &cpu_mask);
1423                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1424                     &cpu_mask, "%s (bucket %d)",
1425                     device_get_nameunit(dev), cpu_id);
1426 #else
1427                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1428                     "%s que", device_get_nameunit(dev));
1429 #endif
1430
1431         }
1432
1433         return (0);
1434 }
1435
1436 /*
1437 ** Requests a VF reset from the PF.
1438 **
1439 ** Requires the VF's Admin Queue to be initialized.
1440 */
1441 static int
1442 ixlv_reset(struct ixlv_sc *sc)
1443 {
1444         struct i40e_hw  *hw = &sc->hw;
1445         device_t        dev = sc->dev;
1446         int             error = 0;
1447
1448         /* Ask the PF to reset us if we are initiating */
1449         if (sc->init_state != IXLV_RESET_PENDING)
1450                 ixlv_request_reset(sc);
1451
1452         i40e_msec_delay(100);
1453         error = ixlv_reset_complete(hw);
1454         if (error) {
1455                 device_printf(dev, "%s: VF reset failed\n",
1456                     __func__);
1457                 return (error);
1458         }
1459
1460         error = i40e_shutdown_adminq(hw);
1461         if (error) {
1462                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1463                     __func__, error);
1464                 return (error);
1465         }
1466
1467         error = i40e_init_adminq(hw);
1468         if (error) {
1469                 device_printf(dev, "%s: init_adminq failed: %d\n",
1470                     __func__, error);
1471                 return(error);
1472         }
1473
1474         return (0);
1475 }
1476
1477 static int
1478 ixlv_reset_complete(struct i40e_hw *hw)
1479 {
1480         u32 reg;
1481
1482         for (int i = 0; i < 100; i++) {
1483                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1484                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1485
1486                 if ((reg == I40E_VFR_VFACTIVE) ||
1487                     (reg == I40E_VFR_COMPLETED))
1488                         return (0);
1489                 i40e_msec_delay(100);
1490         }
1491
1492         return (EBUSY);
1493 }
1494
1495
1496 /*********************************************************************
1497  *
1498  *  Setup networking device structure and register an interface.
1499  *
1500  **********************************************************************/
1501 static int
1502 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1503 {
1504         struct ifnet            *ifp;
1505         struct ixl_vsi          *vsi = &sc->vsi;
1506         struct ixl_queue        *que = vsi->queues;
1507
1508         INIT_DBG_DEV(dev, "begin");
1509
1510         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1511         if (ifp == NULL) {
1512                 device_printf(dev, "%s: could not allocate ifnet"
1513                     " structure!\n", __func__);
1514                 return (-1);
1515         }
1516
1517         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1518
1519         ifp->if_mtu = ETHERMTU;
1520         ifp->if_baudrate = 4000000000;  // ??
1521         ifp->if_init = ixlv_init;
1522         ifp->if_softc = vsi;
1523         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1524         ifp->if_ioctl = ixlv_ioctl;
1525
1526 #if __FreeBSD_version >= 1100000
1527         if_setgetcounterfn(ifp, ixl_get_counter);
1528 #endif
1529
1530         ifp->if_transmit = ixl_mq_start;
1531
1532         ifp->if_qflush = ixl_qflush;
1533         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1534
1535         ether_ifattach(ifp, sc->hw.mac.addr);
1536
1537         vsi->max_frame_size =
1538             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1539             + ETHER_VLAN_ENCAP_LEN;
1540
1541         /*
1542          * Tell the upper layer(s) we support long frames.
1543          */
1544         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1545
1546         ifp->if_capabilities |= IFCAP_HWCSUM;
1547         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1548         ifp->if_capabilities |= IFCAP_TSO;
1549         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1550
1551         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1552                              |  IFCAP_VLAN_HWTSO
1553                              |  IFCAP_VLAN_MTU
1554                              |  IFCAP_VLAN_HWCSUM
1555                              |  IFCAP_LRO;
1556         ifp->if_capenable = ifp->if_capabilities;
1557
1558         /*
1559         ** Don't turn this on by default, if vlans are
1560         ** created on another pseudo device (eg. lagg)
1561         ** then vlan events are not passed thru, breaking
1562         ** operation, but with HW FILTER off it works. If
1563         ** using vlans directly on the ixl driver you can
1564         ** enable this and get full hardware tag filtering.
1565         */
1566         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1567
1568         /*
1569          * Specify the media types supported by this adapter and register
1570          * callbacks to update media and link information
1571          */
1572         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1573                      ixlv_media_status);
1574
1575         // JFV Add media types later?
1576
1577         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1578         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1579
1580         INIT_DBG_DEV(dev, "end");
1581         return (0);
1582 }
1583
1584 /*
1585 ** Allocate and setup the interface queues
1586 */
1587 static int
1588 ixlv_setup_queues(struct ixlv_sc *sc)
1589 {
1590         device_t                dev = sc->dev;
1591         struct ixl_vsi          *vsi;
1592         struct ixl_queue        *que;
1593         struct tx_ring          *txr;
1594         struct rx_ring          *rxr;
1595         int                     rsize, tsize;
1596         int                     error = I40E_SUCCESS;
1597
1598         vsi = &sc->vsi;
1599         vsi->back = (void *)sc;
1600         vsi->hw = &sc->hw;
1601         vsi->num_vlans = 0;
1602
1603         /* Get memory for the station queues */
1604         if (!(vsi->queues =
1605                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1606                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1607                         device_printf(dev, "Unable to allocate queue memory\n");
1608                         error = ENOMEM;
1609                         goto early;
1610         }
1611
1612         for (int i = 0; i < vsi->num_queues; i++) {
1613                 que = &vsi->queues[i];
1614                 que->num_desc = ixlv_ringsz;
1615                 que->me = i;
1616                 que->vsi = vsi;
1617                 /* mark the queue as active */
1618                 vsi->active_queues |= (u64)1 << que->me;
1619
1620                 txr = &que->txr;
1621                 txr->que = que;
1622                 txr->tail = I40E_QTX_TAIL1(que->me);
1623                 /* Initialize the TX lock */
1624                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1625                     device_get_nameunit(dev), que->me);
1626                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1627                 /*
1628                 ** Create the TX descriptor ring, the extra int is
1629                 ** added as the location for HEAD WB.
1630                 */
1631                 tsize = roundup2((que->num_desc *
1632                     sizeof(struct i40e_tx_desc)) +
1633                     sizeof(u32), DBA_ALIGN);
1634                 if (i40e_allocate_dma_mem(&sc->hw,
1635                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1636                         device_printf(dev,
1637                             "Unable to allocate TX Descriptor memory\n");
1638                         error = ENOMEM;
1639                         goto fail;
1640                 }
1641                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1642                 bzero((void *)txr->base, tsize);
1643                 /* Now allocate transmit soft structs for the ring */
1644                 if (ixl_allocate_tx_data(que)) {
1645                         device_printf(dev,
1646                             "Critical Failure setting up TX structures\n");
1647                         error = ENOMEM;
1648                         goto fail;
1649                 }
1650                 /* Allocate a buf ring */
1651                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1652                     M_WAITOK, &txr->mtx);
1653                 if (txr->br == NULL) {
1654                         device_printf(dev,
1655                             "Critical Failure setting up TX buf ring\n");
1656                         error = ENOMEM;
1657                         goto fail;
1658                 }
1659
1660                 /*
1661                  * Next the RX queues...
1662                  */ 
1663                 rsize = roundup2(que->num_desc *
1664                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1665                 rxr = &que->rxr;
1666                 rxr->que = que;
1667                 rxr->tail = I40E_QRX_TAIL1(que->me);
1668
1669                 /* Initialize the RX side lock */
1670                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1671                     device_get_nameunit(dev), que->me);
1672                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1673
1674                 if (i40e_allocate_dma_mem(&sc->hw,
1675                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1676                         device_printf(dev,
1677                             "Unable to allocate RX Descriptor memory\n");
1678                         error = ENOMEM;
1679                         goto fail;
1680                 }
1681                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1682                 bzero((void *)rxr->base, rsize);
1683
1684                 /* Allocate receive soft structs for the ring*/
1685                 if (ixl_allocate_rx_data(que)) {
1686                         device_printf(dev,
1687                             "Critical Failure setting up receive structs\n");
1688                         error = ENOMEM;
1689                         goto fail;
1690                 }
1691         }
1692
1693         return (0);
1694
1695 fail:
1696         for (int i = 0; i < vsi->num_queues; i++) {
1697                 que = &vsi->queues[i];
1698                 rxr = &que->rxr;
1699                 txr = &que->txr;
1700                 if (rxr->base)
1701                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1702                 if (txr->base)
1703                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1704         }
1705         free(vsi->queues, M_DEVBUF);
1706
1707 early:
1708         return (error);
1709 }
1710
1711 /*
1712 ** This routine is run via an vlan config EVENT,
1713 ** it enables us to use the HW Filter table since
1714 ** we can get the vlan id. This just creates the
1715 ** entry in the soft version of the VFTA, init will
1716 ** repopulate the real table.
1717 */
1718 static void
1719 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1720 {
1721         struct ixl_vsi          *vsi = arg;
1722         struct ixlv_sc          *sc = vsi->back;
1723         struct ixlv_vlan_filter *v;
1724
1725
1726         if (ifp->if_softc != arg)   /* Not our event */
1727                 return;
1728
1729         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1730                 return;
1731
1732         /* Sanity check - make sure it doesn't already exist */
1733         SLIST_FOREACH(v, sc->vlan_filters, next) {
1734                 if (v->vlan == vtag)
1735                         return;
1736         }
1737
1738         mtx_lock(&sc->mtx);
1739         ++vsi->num_vlans;
1740         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1741         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1742         v->vlan = vtag;
1743         v->flags = IXL_FILTER_ADD;
1744         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1745             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1746         mtx_unlock(&sc->mtx);
1747         return;
1748 }
1749
1750 /*
1751 ** This routine is run via an vlan
1752 ** unconfig EVENT, remove our entry
1753 ** in the soft vfta.
1754 */
1755 static void
1756 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1757 {
1758         struct ixl_vsi          *vsi = arg;
1759         struct ixlv_sc          *sc = vsi->back;
1760         struct ixlv_vlan_filter *v;
1761         int                     i = 0;
1762         
1763         if (ifp->if_softc != arg)
1764                 return;
1765
1766         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1767                 return;
1768
1769         mtx_lock(&sc->mtx);
1770         SLIST_FOREACH(v, sc->vlan_filters, next) {
1771                 if (v->vlan == vtag) {
1772                         v->flags = IXL_FILTER_DEL;
1773                         ++i;
1774                         --vsi->num_vlans;
1775                 }
1776         }
1777         if (i)
1778                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1779                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1780         mtx_unlock(&sc->mtx);
1781         return;
1782 }
1783
1784 /*
1785 ** Get a new filter and add it to the mac filter list.
1786 */
1787 static struct ixlv_mac_filter *
1788 ixlv_get_mac_filter(struct ixlv_sc *sc)
1789 {
1790         struct ixlv_mac_filter  *f;
1791
1792         f = malloc(sizeof(struct ixlv_mac_filter),
1793             M_DEVBUF, M_NOWAIT | M_ZERO);
1794         if (f)
1795                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1796
1797         return (f);
1798 }
1799
1800 /*
1801 ** Find the filter with matching MAC address
1802 */
1803 static struct ixlv_mac_filter *
1804 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1805 {
1806         struct ixlv_mac_filter  *f;
1807         bool                            match = FALSE;
1808
1809         SLIST_FOREACH(f, sc->mac_filters, next) {
1810                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1811                         match = TRUE;
1812                         break;
1813                 }
1814         }       
1815
1816         if (!match)
1817                 f = NULL;
1818         return (f);
1819 }
1820
1821 /*
1822 ** Admin Queue interrupt handler
1823 */
1824 static void
1825 ixlv_msix_adminq(void *arg)
1826 {
1827         struct ixlv_sc  *sc = arg;
1828         struct i40e_hw  *hw = &sc->hw;
1829         u32             reg, mask;
1830
1831         reg = rd32(hw, I40E_VFINT_ICR01);
1832         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1833
1834         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1835         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1836         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1837
1838         /* schedule task */
1839         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1840         return;
1841 }
1842
1843 void
1844 ixlv_enable_intr(struct ixl_vsi *vsi)
1845 {
1846         struct i40e_hw          *hw = vsi->hw;
1847         struct ixl_queue        *que = vsi->queues;
1848
1849         ixlv_enable_adminq_irq(hw);
1850         for (int i = 0; i < vsi->num_queues; i++, que++)
1851                 ixlv_enable_queue_irq(hw, que->me);
1852 }
1853
1854 void
1855 ixlv_disable_intr(struct ixl_vsi *vsi)
1856 {
1857         struct i40e_hw          *hw = vsi->hw;
1858         struct ixl_queue       *que = vsi->queues;
1859
1860         ixlv_disable_adminq_irq(hw);
1861         for (int i = 0; i < vsi->num_queues; i++, que++)
1862                 ixlv_disable_queue_irq(hw, que->me);
1863 }
1864
1865
1866 static void
1867 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1868 {
1869         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1870         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1871         /* flush */
1872         rd32(hw, I40E_VFGEN_RSTAT);
1873         return;
1874 }
1875
1876 static void
1877 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1878 {
1879         wr32(hw, I40E_VFINT_DYN_CTL01,
1880             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1881             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1882         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1883         /* flush */
1884         rd32(hw, I40E_VFGEN_RSTAT);
1885         return;
1886 }
1887
1888 static void
1889 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1890 {
1891         u32             reg;
1892
1893         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1894             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; 
1895         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1896 }
1897
1898 static void
1899 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1900 {
1901         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1902         rd32(hw, I40E_VFGEN_RSTAT);
1903         return;
1904 }
1905
1906
1907 /*
1908 ** Provide a update to the queue RX
1909 ** interrupt moderation value.
1910 */
1911 static void
1912 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1913 {
1914         struct ixl_vsi  *vsi = que->vsi;
1915         struct i40e_hw  *hw = vsi->hw;
1916         struct rx_ring  *rxr = &que->rxr;
1917         u16             rx_itr;
1918         u16             rx_latency = 0;
1919         int             rx_bytes;
1920
1921
1922         /* Idle, do nothing */
1923         if (rxr->bytes == 0)
1924                 return;
1925
1926         if (ixlv_dynamic_rx_itr) {
1927                 rx_bytes = rxr->bytes/rxr->itr;
1928                 rx_itr = rxr->itr;
1929
1930                 /* Adjust latency range */
1931                 switch (rxr->latency) {
1932                 case IXL_LOW_LATENCY:
1933                         if (rx_bytes > 10) {
1934                                 rx_latency = IXL_AVE_LATENCY;
1935                                 rx_itr = IXL_ITR_20K;
1936                         }
1937                         break;
1938                 case IXL_AVE_LATENCY:
1939                         if (rx_bytes > 20) {
1940                                 rx_latency = IXL_BULK_LATENCY;
1941                                 rx_itr = IXL_ITR_8K;
1942                         } else if (rx_bytes <= 10) {
1943                                 rx_latency = IXL_LOW_LATENCY;
1944                                 rx_itr = IXL_ITR_100K;
1945                         }
1946                         break;
1947                 case IXL_BULK_LATENCY:
1948                         if (rx_bytes <= 20) {
1949                                 rx_latency = IXL_AVE_LATENCY;
1950                                 rx_itr = IXL_ITR_20K;
1951                         }
1952                         break;
1953                  }
1954
1955                 rxr->latency = rx_latency;
1956
1957                 if (rx_itr != rxr->itr) {
1958                         /* do an exponential smoothing */
1959                         rx_itr = (10 * rx_itr * rxr->itr) /
1960                             ((9 * rx_itr) + rxr->itr);
1961                         rxr->itr = rx_itr & IXL_MAX_ITR;
1962                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1963                             que->me), rxr->itr);
1964                 }
1965         } else { /* We may have have toggled to non-dynamic */
1966                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1967                         vsi->rx_itr_setting = ixlv_rx_itr;
1968                 /* Update the hardware if needed */
1969                 if (rxr->itr != vsi->rx_itr_setting) {
1970                         rxr->itr = vsi->rx_itr_setting;
1971                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1972                             que->me), rxr->itr);
1973                 }
1974         }
1975         rxr->bytes = 0;
1976         rxr->packets = 0;
1977         return;
1978 }
1979
1980
1981 /*
1982 ** Provide a update to the queue TX
1983 ** interrupt moderation value.
1984 */
1985 static void
1986 ixlv_set_queue_tx_itr(struct ixl_queue *que)
1987 {
1988         struct ixl_vsi  *vsi = que->vsi;
1989         struct i40e_hw  *hw = vsi->hw;
1990         struct tx_ring  *txr = &que->txr;
1991         u16             tx_itr;
1992         u16             tx_latency = 0;
1993         int             tx_bytes;
1994
1995
1996         /* Idle, do nothing */
1997         if (txr->bytes == 0)
1998                 return;
1999
2000         if (ixlv_dynamic_tx_itr) {
2001                 tx_bytes = txr->bytes/txr->itr;
2002                 tx_itr = txr->itr;
2003
2004                 switch (txr->latency) {
2005                 case IXL_LOW_LATENCY:
2006                         if (tx_bytes > 10) {
2007                                 tx_latency = IXL_AVE_LATENCY;
2008                                 tx_itr = IXL_ITR_20K;
2009                         }
2010                         break;
2011                 case IXL_AVE_LATENCY:
2012                         if (tx_bytes > 20) {
2013                                 tx_latency = IXL_BULK_LATENCY;
2014                                 tx_itr = IXL_ITR_8K;
2015                         } else if (tx_bytes <= 10) {
2016                                 tx_latency = IXL_LOW_LATENCY;
2017                                 tx_itr = IXL_ITR_100K;
2018                         }
2019                         break;
2020                 case IXL_BULK_LATENCY:
2021                         if (tx_bytes <= 20) {
2022                                 tx_latency = IXL_AVE_LATENCY;
2023                                 tx_itr = IXL_ITR_20K;
2024                         }
2025                         break;
2026                 }
2027
2028                 txr->latency = tx_latency;
2029
2030                 if (tx_itr != txr->itr) {
2031                  /* do an exponential smoothing */
2032                         tx_itr = (10 * tx_itr * txr->itr) /
2033                             ((9 * tx_itr) + txr->itr);
2034                         txr->itr = tx_itr & IXL_MAX_ITR;
2035                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2036                             que->me), txr->itr);
2037                 }
2038
2039         } else { /* We may have have toggled to non-dynamic */
2040                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2041                         vsi->tx_itr_setting = ixlv_tx_itr;
2042                 /* Update the hardware if needed */
2043                 if (txr->itr != vsi->tx_itr_setting) {
2044                         txr->itr = vsi->tx_itr_setting;
2045                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2046                             que->me), txr->itr);
2047                 }
2048         }
2049         txr->bytes = 0;
2050         txr->packets = 0;
2051         return;
2052 }
2053
2054
2055 /*
2056 **
2057 ** MSIX Interrupt Handlers and Tasklets
2058 **
2059 */
2060 static void
2061 ixlv_handle_que(void *context, int pending)
2062 {
2063         struct ixl_queue *que = context;
2064         struct ixl_vsi *vsi = que->vsi;
2065         struct i40e_hw  *hw = vsi->hw;
2066         struct tx_ring  *txr = &que->txr;
2067         struct ifnet    *ifp = vsi->ifp;
2068         bool            more;
2069
2070         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2071                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2072                 mtx_lock(&txr->mtx);
2073                 ixl_txeof(que);
2074                 if (!drbr_empty(ifp, txr->br))
2075                         ixl_mq_start_locked(ifp, txr);
2076                 mtx_unlock(&txr->mtx);
2077                 if (more) {
2078                         taskqueue_enqueue(que->tq, &que->task);
2079                         return;
2080                 }
2081         }
2082
2083         /* Reenable this interrupt - hmmm */
2084         ixlv_enable_queue_irq(hw, que->me);
2085         return;
2086 }
2087
2088
2089 /*********************************************************************
2090  *
2091  *  MSIX Queue Interrupt Service routine
2092  *
2093  **********************************************************************/
2094 static void
2095 ixlv_msix_que(void *arg)
2096 {
2097         struct ixl_queue        *que = arg;
2098         struct ixl_vsi  *vsi = que->vsi;
2099         struct i40e_hw  *hw = vsi->hw;
2100         struct tx_ring  *txr = &que->txr;
2101         bool            more_tx, more_rx;
2102
2103         /* Spurious interrupts are ignored */
2104         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2105                 return;
2106
2107         ++que->irqs;
2108
2109         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2110
2111         mtx_lock(&txr->mtx);
2112         more_tx = ixl_txeof(que);
2113         /*
2114         ** Make certain that if the stack 
2115         ** has anything queued the task gets
2116         ** scheduled to handle it.
2117         */
2118         if (!drbr_empty(vsi->ifp, txr->br))
2119                 more_tx = 1;
2120         mtx_unlock(&txr->mtx);
2121
2122         ixlv_set_queue_rx_itr(que);
2123         ixlv_set_queue_tx_itr(que);
2124
2125         if (more_tx || more_rx)
2126                 taskqueue_enqueue(que->tq, &que->task);
2127         else
2128                 ixlv_enable_queue_irq(hw, que->me);
2129
2130         return;
2131 }
2132
2133
2134 /*********************************************************************
2135  *
2136  *  Media Ioctl callback
2137  *
2138  *  This routine is called whenever the user queries the status of
2139  *  the interface using ifconfig.
2140  *
2141  **********************************************************************/
2142 static void
2143 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2144 {
2145         struct ixl_vsi          *vsi = ifp->if_softc;
2146         struct ixlv_sc  *sc = vsi->back;
2147
2148         INIT_DBG_IF(ifp, "begin");
2149
2150         mtx_lock(&sc->mtx);
2151
2152         ixlv_update_link_status(sc);
2153
2154         ifmr->ifm_status = IFM_AVALID;
2155         ifmr->ifm_active = IFM_ETHER;
2156
2157         if (!sc->link_up) {
2158                 mtx_unlock(&sc->mtx);
2159                 INIT_DBG_IF(ifp, "end: link not up");
2160                 return;
2161         }
2162
2163         ifmr->ifm_status |= IFM_ACTIVE;
2164         /* Hardware is always full-duplex */
2165         ifmr->ifm_active |= IFM_FDX;
2166         mtx_unlock(&sc->mtx);
2167         INIT_DBG_IF(ifp, "end");
2168         return;
2169 }
2170
2171 /*********************************************************************
2172  *
2173  *  Media Ioctl callback
2174  *
2175  *  This routine is called when the user changes speed/duplex using
2176  *  media/mediopt option with ifconfig.
2177  *
2178  **********************************************************************/
2179 static int
2180 ixlv_media_change(struct ifnet * ifp)
2181 {
2182         struct ixl_vsi *vsi = ifp->if_softc;
2183         struct ifmedia *ifm = &vsi->media;
2184
2185         INIT_DBG_IF(ifp, "begin");
2186
2187         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2188                 return (EINVAL);
2189
2190         INIT_DBG_IF(ifp, "end");
2191         return (0);
2192 }
2193
2194
2195 /*********************************************************************
2196  *  Multicast Initialization
2197  *
2198  *  This routine is called by init to reset a fresh state.
2199  *
2200  **********************************************************************/
2201
2202 static void
2203 ixlv_init_multi(struct ixl_vsi *vsi)
2204 {
2205         struct ixlv_mac_filter *f;
2206         struct ixlv_sc  *sc = vsi->back;
2207         int                     mcnt = 0;
2208
2209         IOCTL_DBG_IF(vsi->ifp, "begin");
2210
2211         /* First clear any multicast filters */
2212         SLIST_FOREACH(f, sc->mac_filters, next) {
2213                 if ((f->flags & IXL_FILTER_USED)
2214                     && (f->flags & IXL_FILTER_MC)) {
2215                         f->flags |= IXL_FILTER_DEL;
2216                         mcnt++;
2217                 }
2218         }
2219         if (mcnt > 0)
2220                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2221                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2222                     sc);
2223
2224         IOCTL_DBG_IF(vsi->ifp, "end");
2225 }
2226
2227 static void
2228 ixlv_add_multi(struct ixl_vsi *vsi)
2229 {
2230         struct ifmultiaddr      *ifma;
2231         struct ifnet            *ifp = vsi->ifp;
2232         struct ixlv_sc  *sc = vsi->back;
2233         int                     mcnt = 0;
2234
2235         IOCTL_DBG_IF(ifp, "begin");
2236
2237         if_maddr_rlock(ifp);
2238         /*
2239         ** Get a count, to decide if we
2240         ** simply use multicast promiscuous.
2241         */
2242         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2243                 if (ifma->ifma_addr->sa_family != AF_LINK)
2244                         continue;
2245                 mcnt++;
2246         }
2247         if_maddr_runlock(ifp);
2248
2249         // TODO: Remove -- cannot set promiscuous mode in a VF
2250         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2251                 /* delete all multicast filters */
2252                 ixlv_init_multi(vsi);
2253                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2254                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2255                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2256                     sc);
2257                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2258                 return;
2259         }
2260
2261         mcnt = 0;
2262         if_maddr_rlock(ifp);
2263         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2264                 if (ifma->ifma_addr->sa_family != AF_LINK)
2265                         continue;
2266                 if (!ixlv_add_mac_filter(sc,
2267                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2268                     IXL_FILTER_MC))
2269                         mcnt++;
2270         }
2271         if_maddr_runlock(ifp);
2272         /*
2273         ** Notify AQ task that sw filters need to be
2274         ** added to hw list
2275         */
2276         if (mcnt > 0)
2277                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2278                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2279                     sc);
2280
2281         IOCTL_DBG_IF(ifp, "end");
2282 }
2283
2284 static void
2285 ixlv_del_multi(struct ixl_vsi *vsi)
2286 {
2287         struct ixlv_mac_filter *f;
2288         struct ifmultiaddr      *ifma;
2289         struct ifnet            *ifp = vsi->ifp;
2290         struct ixlv_sc  *sc = vsi->back;
2291         int                     mcnt = 0;
2292         bool            match = FALSE;
2293
2294         IOCTL_DBG_IF(ifp, "begin");
2295
2296         /* Search for removed multicast addresses */
2297         if_maddr_rlock(ifp);
2298         SLIST_FOREACH(f, sc->mac_filters, next) {
2299                 if ((f->flags & IXL_FILTER_USED)
2300                     && (f->flags & IXL_FILTER_MC)) {
2301                         /* check if mac address in filter is in sc's list */
2302                         match = FALSE;
2303                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2304                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2305                                         continue;
2306                                 u8 *mc_addr =
2307                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2308                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2309                                         match = TRUE;
2310                                         break;
2311                                 }
2312                         }
2313                         /* if this filter is not in the sc's list, remove it */
2314                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2315                                 f->flags |= IXL_FILTER_DEL;
2316                                 mcnt++;
2317                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2318                                     MAC_FORMAT_ARGS(f->macaddr));
2319                         }
2320                         else if (match == FALSE)
2321                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2322                                     MAC_FORMAT_ARGS(f->macaddr));
2323                 }
2324         }
2325         if_maddr_runlock(ifp);
2326
2327         if (mcnt > 0)
2328                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2329                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2330                     sc);
2331
2332         IOCTL_DBG_IF(ifp, "end");
2333 }
2334
2335 /*********************************************************************
2336  *  Timer routine
2337  *
2338  *  This routine checks for link status,updates statistics,
2339  *  and runs the watchdog check.
2340  *
2341  **********************************************************************/
2342
2343 static void
2344 ixlv_local_timer(void *arg)
2345 {
2346         struct ixlv_sc  *sc = arg;
2347         struct i40e_hw          *hw = &sc->hw;
2348         struct ixl_vsi          *vsi = &sc->vsi;
2349         struct ixl_queue        *que = vsi->queues;
2350         device_t                dev = sc->dev;
2351         int                     hung = 0;
2352         u32                     mask, val;
2353
2354         IXLV_CORE_LOCK_ASSERT(sc);
2355
2356         /* If Reset is in progress just bail */
2357         if (sc->init_state == IXLV_RESET_PENDING)
2358                 return;
2359
2360         /* Check for when PF triggers a VF reset */
2361         val = rd32(hw, I40E_VFGEN_RSTAT) &
2362             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2363
2364         if (val != I40E_VFR_VFACTIVE
2365             && val != I40E_VFR_COMPLETED) {
2366                 DDPRINTF(dev, "reset in progress! (%d)", val);
2367                 return;
2368         }
2369
2370         ixlv_request_stats(sc);
2371
2372         /* clean and process any events */
2373         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2374
2375         /*
2376         ** Check status on the queues for a hang
2377         */
2378         mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2379             I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2380
2381         for (int i = 0; i < vsi->num_queues; i++,que++) {
2382                 /* Any queues with outstanding work get a sw irq */
2383                 if (que->busy)
2384                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2385                 /*
2386                 ** Each time txeof runs without cleaning, but there
2387                 ** are uncleaned descriptors it increments busy. If
2388                 ** we get to 5 we declare it hung.
2389                 */
2390                 if (que->busy == IXL_QUEUE_HUNG) {
2391                         ++hung;
2392                         /* Mark the queue as inactive */
2393                         vsi->active_queues &= ~((u64)1 << que->me);
2394                         continue;
2395                 } else {
2396                         /* Check if we've come back from hung */
2397                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2398                                 vsi->active_queues |= ((u64)1 << que->me);
2399                 }
2400                 if (que->busy >= IXL_MAX_TX_BUSY) {
2401                         device_printf(dev,"Warning queue %d "
2402                             "appears to be hung!\n", i);
2403                         que->busy = IXL_QUEUE_HUNG;
2404                         ++hung;
2405                 }
2406         }
2407         /* Only reset when all queues show hung */
2408         if (hung == vsi->num_queues)
2409                 goto hung;
2410         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2411         return;
2412
2413 hung:
2414         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2415         sc->init_state = IXLV_RESET_REQUIRED;
2416         ixlv_init_locked(sc);
2417 }
2418
2419 /*
2420 ** Note: this routine updates the OS on the link state
2421 **      the real check of the hardware only happens with
2422 **      a link interrupt.
2423 */
2424 void
2425 ixlv_update_link_status(struct ixlv_sc *sc)
2426 {
2427         struct ixl_vsi          *vsi = &sc->vsi;
2428         struct ifnet            *ifp = vsi->ifp;
2429
2430         if (sc->link_up){ 
2431                 if (vsi->link_active == FALSE) {
2432                         if (bootverbose)
2433                                 if_printf(ifp,"Link is Up, %d Gbps\n",
2434                                     (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2435                         vsi->link_active = TRUE;
2436                         if_link_state_change(ifp, LINK_STATE_UP);
2437                 }
2438         } else { /* Link down */
2439                 if (vsi->link_active == TRUE) {
2440                         if (bootverbose)
2441                                 if_printf(ifp,"Link is Down\n");
2442                         if_link_state_change(ifp, LINK_STATE_DOWN);
2443                         vsi->link_active = FALSE;
2444                 }
2445         }
2446
2447         return;
2448 }
2449
2450 /*********************************************************************
2451  *
2452  *  This routine disables all traffic on the adapter by issuing a
2453  *  global reset on the MAC and deallocates TX/RX buffers.
2454  *
2455  **********************************************************************/
2456
2457 static void
2458 ixlv_stop(struct ixlv_sc *sc)
2459 {
2460         struct ifnet *ifp;
2461         int start;
2462
2463         ifp = sc->vsi.ifp;
2464         INIT_DBG_IF(ifp, "begin");
2465
2466         IXLV_CORE_LOCK_ASSERT(sc);
2467
2468         ixl_vc_flush(&sc->vc_mgr);
2469         ixlv_disable_queues(sc);
2470
2471         start = ticks;
2472         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2473             ((ticks - start) < hz/10))
2474                 ixlv_do_adminq_locked(sc);
2475
2476         /* Stop the local timer */
2477         callout_stop(&sc->timer);
2478
2479         INIT_DBG_IF(ifp, "end");
2480 }
2481
2482
2483 /*********************************************************************
2484  *
2485  *  Free all station queue structs.
2486  *
2487  **********************************************************************/
2488 static void
2489 ixlv_free_queues(struct ixl_vsi *vsi)
2490 {
2491         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2492         struct ixl_queue        *que = vsi->queues;
2493
2494         for (int i = 0; i < vsi->num_queues; i++, que++) {
2495                 struct tx_ring *txr = &que->txr;
2496                 struct rx_ring *rxr = &que->rxr;
2497         
2498                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2499                         continue;
2500                 IXL_TX_LOCK(txr);
2501                 ixl_free_que_tx(que);
2502                 if (txr->base)
2503                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2504                 IXL_TX_UNLOCK(txr);
2505                 IXL_TX_LOCK_DESTROY(txr);
2506
2507                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2508                         continue;
2509                 IXL_RX_LOCK(rxr);
2510                 ixl_free_que_rx(que);
2511                 if (rxr->base)
2512                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2513                 IXL_RX_UNLOCK(rxr);
2514                 IXL_RX_LOCK_DESTROY(rxr);
2515                 
2516         }
2517         free(vsi->queues, M_DEVBUF);
2518 }
2519
2520
2521 /*
2522 ** ixlv_config_rss - setup RSS 
2523 **
2524 ** RSS keys and table are cleared on VF reset.
2525 */
2526 static void
2527 ixlv_config_rss(struct ixlv_sc *sc)
2528 {
2529         struct i40e_hw  *hw = &sc->hw;
2530         struct ixl_vsi  *vsi = &sc->vsi;
2531         u32             lut = 0;
2532         u64             set_hena = 0, hena;
2533         int             i, j, que_id;
2534 #ifdef RSS
2535         u32             rss_hash_config;
2536         u32             rss_seed[IXL_KEYSZ];
2537 #else
2538         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2539                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2540                             0x35897377, 0x328b25e1, 0x4fa98922,
2541                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2542 #endif
2543         
2544         /* Don't set up RSS if using a single queue */
2545         if (vsi->num_queues == 1) {
2546                 wr32(hw, I40E_VFQF_HENA(0), 0);
2547                 wr32(hw, I40E_VFQF_HENA(1), 0);
2548                 ixl_flush(hw);
2549                 return;
2550         }
2551
2552 #ifdef RSS
2553         /* Fetch the configured RSS key */
2554         rss_getkey((uint8_t *) &rss_seed);
2555 #endif
2556         /* Fill out hash function seed */
2557         for (i = 0; i <= IXL_KEYSZ; i++)
2558                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2559
2560         /* Enable PCTYPES for RSS: */
2561 #ifdef RSS
2562         rss_hash_config = rss_gethashconfig();
2563         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2564                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2565         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2566                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2567         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2568                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2569         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2570                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2571         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2572                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2573         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2574                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2575         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2576                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2577 #else
2578         set_hena =
2579                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2580                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2581                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2582                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2583                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2584                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2585                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2586                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2587                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2588                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2589                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2590 #endif
2591         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2592             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2593         hena |= set_hena;
2594         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2595         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2596
2597         /* Populate the LUT with max no. of queues in round robin fashion */
2598         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2599                 if (j == vsi->num_queues)
2600                         j = 0;
2601 #ifdef RSS
2602                 /*
2603                  * Fetch the RSS bucket id for the given indirection entry.
2604                  * Cap it at the number of configured buckets (which is
2605                  * num_queues.)
2606                  */
2607                 que_id = rss_get_indirection_to_bucket(i);
2608                 que_id = que_id % vsi->num_queues;
2609 #else
2610                 que_id = j;
2611 #endif
2612                 /* lut = 4-byte sliding window of 4 lut entries */
2613                 lut = (lut << 8) | (que_id & 0xF);
2614                 /* On i = 3, we have 4 entries in lut; write to the register */
2615                 if ((i & 3) == 3) {
2616                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2617                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2618                 }
2619         }
2620         ixl_flush(hw);
2621 }
2622
2623
2624 /*
2625 ** This routine refreshes vlan filters, called by init
2626 ** it scans the filter table and then updates the AQ
2627 */
2628 static void
2629 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2630 {
2631         struct ixl_vsi                  *vsi = &sc->vsi;
2632         struct ixlv_vlan_filter *f;
2633         int                             cnt = 0;
2634
2635         if (vsi->num_vlans == 0)
2636                 return;
2637         /*
2638         ** Scan the filter table for vlan entries,
2639         ** and if found call for the AQ update.
2640         */
2641         SLIST_FOREACH(f, sc->vlan_filters, next)
2642                 if (f->flags & IXL_FILTER_ADD)
2643                         cnt++;
2644         if (cnt > 0)
2645                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2646                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2647 }
2648
2649
2650 /*
2651 ** This routine adds new MAC filters to the sc's list;
2652 ** these are later added in hardware by sending a virtual
2653 ** channel message.
2654 */
2655 static int
2656 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2657 {
2658         struct ixlv_mac_filter  *f;
2659
2660         /* Does one already exist? */
2661         f = ixlv_find_mac_filter(sc, macaddr);
2662         if (f != NULL) {
2663                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2664                     MAC_FORMAT_ARGS(macaddr));
2665                 return (EEXIST);
2666         }
2667
2668         /* If not, get a new empty filter */
2669         f = ixlv_get_mac_filter(sc);
2670         if (f == NULL) {
2671                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2672                     __func__);
2673                 return (ENOMEM);
2674         }
2675
2676         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2677             MAC_FORMAT_ARGS(macaddr));
2678
2679         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2680         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2681         f->flags |= flags;
2682         return (0);
2683 }
2684
2685 /*
2686 ** Marks a MAC filter for deletion.
2687 */
2688 static int
2689 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2690 {
2691         struct ixlv_mac_filter  *f;
2692
2693         f = ixlv_find_mac_filter(sc, macaddr);
2694         if (f == NULL)
2695                 return (ENOENT);
2696
2697         f->flags |= IXL_FILTER_DEL;
2698         return (0);
2699 }
2700
2701 /*
2702 ** Tasklet handler for MSIX Adminq interrupts
2703 **  - done outside interrupt context since it might sleep
2704 */
2705 static void
2706 ixlv_do_adminq(void *context, int pending)
2707 {
2708         struct ixlv_sc          *sc = context;
2709
2710         mtx_lock(&sc->mtx);
2711         ixlv_do_adminq_locked(sc);
2712         mtx_unlock(&sc->mtx);
2713         return;
2714 }
2715
2716 static void
2717 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2718 {
2719         struct i40e_hw                  *hw = &sc->hw;
2720         struct i40e_arq_event_info      event;
2721         struct i40e_virtchnl_msg        *v_msg;
2722         device_t                        dev = sc->dev;
2723         u16                             result = 0;
2724         u32                             reg, oldreg;
2725         i40e_status                     ret;
2726
2727         IXLV_CORE_LOCK_ASSERT(sc);
2728
2729         event.buf_len = IXL_AQ_BUF_SZ;
2730         event.msg_buf = sc->aq_buffer;
2731         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2732
2733         do {
2734                 ret = i40e_clean_arq_element(hw, &event, &result);
2735                 if (ret)
2736                         break;
2737                 ixlv_vc_completion(sc, v_msg->v_opcode,
2738                     v_msg->v_retval, event.msg_buf, event.msg_len);
2739                 if (result != 0)
2740                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2741         } while (result);
2742
2743         /* check for Admin queue errors */
2744         oldreg = reg = rd32(hw, hw->aq.arq.len);
2745         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2746                 device_printf(dev, "ARQ VF Error detected\n");
2747                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2748         }
2749         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2750                 device_printf(dev, "ARQ Overflow Error detected\n");
2751                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2752         }
2753         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2754                 device_printf(dev, "ARQ Critical Error detected\n");
2755                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2756         }
2757         if (oldreg != reg)
2758                 wr32(hw, hw->aq.arq.len, reg);
2759
2760         oldreg = reg = rd32(hw, hw->aq.asq.len);
2761         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2762                 device_printf(dev, "ASQ VF Error detected\n");
2763                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2764         }
2765         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2766                 device_printf(dev, "ASQ Overflow Error detected\n");
2767                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2768         }
2769         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2770                 device_printf(dev, "ASQ Critical Error detected\n");
2771                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2772         }
2773         if (oldreg != reg)
2774                 wr32(hw, hw->aq.asq.len, reg);
2775
2776         ixlv_enable_adminq_irq(hw);
2777 }
2778
2779 static void
2780 ixlv_add_sysctls(struct ixlv_sc *sc)
2781 {
2782         device_t dev = sc->dev;
2783         struct ixl_vsi *vsi = &sc->vsi;
2784         struct i40e_eth_stats *es = &vsi->eth_stats;
2785
2786         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2787         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2788         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2789
2790         struct sysctl_oid *vsi_node, *queue_node;
2791         struct sysctl_oid_list *vsi_list, *queue_list;
2792
2793 #define QUEUE_NAME_LEN 32
2794         char queue_namebuf[QUEUE_NAME_LEN];
2795
2796         struct ixl_queue *queues = vsi->queues;
2797         struct tx_ring *txr;
2798         struct rx_ring *rxr;
2799
2800         /* Driver statistics sysctls */
2801         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2802                         CTLFLAG_RD, &sc->watchdog_events,
2803                         "Watchdog timeouts");
2804         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2805                         CTLFLAG_RD, &sc->admin_irq,
2806                         "Admin Queue IRQ Handled");
2807
2808         /* VSI statistics sysctls */
2809         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2810                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2811         vsi_list = SYSCTL_CHILDREN(vsi_node);
2812
2813         struct ixl_sysctl_info ctls[] =
2814         {
2815                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2816                 {&es->rx_unicast, "ucast_pkts_rcvd",
2817                         "Unicast Packets Received"},
2818                 {&es->rx_multicast, "mcast_pkts_rcvd",
2819                         "Multicast Packets Received"},
2820                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2821                         "Broadcast Packets Received"},
2822                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2823                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2824                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2825                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2826                 {&es->tx_multicast, "mcast_pkts_txd",
2827                         "Multicast Packets Transmitted"},
2828                 {&es->tx_broadcast, "bcast_pkts_txd",
2829                         "Broadcast Packets Transmitted"},
2830                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2831                 // end
2832                 {0,0,0}
2833         };
2834         struct ixl_sysctl_info *entry = ctls;
2835         while (entry->stat != 0)
2836         {
2837                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2838                                 CTLFLAG_RD, entry->stat,
2839                                 entry->description);
2840                 entry++;
2841         }
2842
2843         /* Queue sysctls */
2844         for (int q = 0; q < vsi->num_queues; q++) {
2845                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2846                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2847                                              CTLFLAG_RD, NULL, "Queue Name");
2848                 queue_list = SYSCTL_CHILDREN(queue_node);
2849
2850                 txr = &(queues[q].txr);
2851                 rxr = &(queues[q].rxr);
2852
2853                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2854                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2855                                 "m_defrag() failed");
2856                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2857                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2858                                 "Driver dropped packets");
2859                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2860                                 CTLFLAG_RD, &(queues[q].irqs),
2861                                 "irqs on this queue");
2862                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2863                                 CTLFLAG_RD, &(queues[q].tso),
2864                                 "TSO");
2865                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2866                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2867                                 "Driver tx dma failure in xmit");
2868                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2869                                 CTLFLAG_RD, &(txr->no_desc),
2870                                 "Queue No Descriptor Available");
2871                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2872                                 CTLFLAG_RD, &(txr->total_packets),
2873                                 "Queue Packets Transmitted");
2874                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2875                                 CTLFLAG_RD, &(txr->tx_bytes),
2876                                 "Queue Bytes Transmitted");
2877                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2878                                 CTLFLAG_RD, &(rxr->rx_packets),
2879                                 "Queue Packets Received");
2880                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2881                                 CTLFLAG_RD, &(rxr->rx_bytes),
2882                                 "Queue Bytes Received");
2883
2884                 /* Examine queue state */
2885                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2886                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2887                                 sizeof(struct ixl_queue),
2888                                 ixlv_sysctl_qtx_tail_handler, "IU",
2889                                 "Queue Transmit Descriptor Tail");
2890                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2891                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2892                                 sizeof(struct ixl_queue),
2893                                 ixlv_sysctl_qrx_tail_handler, "IU",
2894                                 "Queue Receive Descriptor Tail");
2895         }
2896 }
2897
2898 static void
2899 ixlv_init_filters(struct ixlv_sc *sc)
2900 {
2901         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2902             M_DEVBUF, M_NOWAIT | M_ZERO);
2903         SLIST_INIT(sc->mac_filters);
2904         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2905             M_DEVBUF, M_NOWAIT | M_ZERO);
2906         SLIST_INIT(sc->vlan_filters);
2907         return;
2908 }
2909
2910 static void
2911 ixlv_free_filters(struct ixlv_sc *sc)
2912 {
2913         struct ixlv_mac_filter *f;
2914         struct ixlv_vlan_filter *v;
2915
2916         while (!SLIST_EMPTY(sc->mac_filters)) {
2917                 f = SLIST_FIRST(sc->mac_filters);
2918                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2919                 free(f, M_DEVBUF);
2920         }
2921         while (!SLIST_EMPTY(sc->vlan_filters)) {
2922                 v = SLIST_FIRST(sc->vlan_filters);
2923                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2924                 free(v, M_DEVBUF);
2925         }
2926         return;
2927 }
2928
2929 /**
2930  * ixlv_sysctl_qtx_tail_handler
2931  * Retrieves I40E_QTX_TAIL1 value from hardware
2932  * for a sysctl.
2933  */
2934 static int 
2935 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2936 {
2937         struct ixl_queue *que;
2938         int error;
2939         u32 val;
2940
2941         que = ((struct ixl_queue *)oidp->oid_arg1);
2942         if (!que) return 0;
2943
2944         val = rd32(que->vsi->hw, que->txr.tail);
2945         error = sysctl_handle_int(oidp, &val, 0, req);
2946         if (error || !req->newptr)
2947                 return error;
2948         return (0);
2949 }
2950
2951 /**
2952  * ixlv_sysctl_qrx_tail_handler
2953  * Retrieves I40E_QRX_TAIL1 value from hardware
2954  * for a sysctl.
2955  */
2956 static int 
2957 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2958 {
2959         struct ixl_queue *que;
2960         int error;
2961         u32 val;
2962
2963         que = ((struct ixl_queue *)oidp->oid_arg1);
2964         if (!que) return 0;
2965
2966         val = rd32(que->vsi->hw, que->rxr.tail);
2967         error = sysctl_handle_int(oidp, &val, 0, req);
2968         if (error || !req->newptr)
2969                 return error;
2970         return (0);
2971 }
2972