]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixl/if_ixlv.c
MFC r277084,r277088,r277130,r277143,r277151,r277262
[FreeBSD/stable/10.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixlv.h"
39
40 #ifdef RSS
41 #include <net/rss_config.h>
42 #endif
43
44 /*********************************************************************
45  *  Driver version
46  *********************************************************************/
47 char ixlv_driver_version[] = "1.2.1";
48
49 /*********************************************************************
50  *  PCI Device ID Table
51  *
52  *  Used by probe to select devices to load on
53  *  Last field stores an index into ixlv_strings
54  *  Last entry must be all 0s
55  *
56  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57  *********************************************************************/
58
59 static ixl_vendor_info_t ixlv_vendor_info_array[] =
60 {
61         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
62         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
63         /* required last entry */
64         {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68  *  Table of branding strings
69  *********************************************************************/
70
71 static char    *ixlv_strings[] = {
72         "Intel(R) Ethernet Connection XL710 VF Driver"
73 };
74
75
76 /*********************************************************************
77  *  Function prototypes
78  *********************************************************************/
79 static int      ixlv_probe(device_t);
80 static int      ixlv_attach(device_t);
81 static int      ixlv_detach(device_t);
82 static int      ixlv_shutdown(device_t);
83 static void     ixlv_init_locked(struct ixlv_sc *);
84 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
85 static void     ixlv_free_pci_resources(struct ixlv_sc *);
86 static int      ixlv_assign_msix(struct ixlv_sc *);
87 static int      ixlv_init_msix(struct ixlv_sc *);
88 static int      ixlv_init_taskqueue(struct ixlv_sc *);
89 static int      ixlv_setup_queues(struct ixlv_sc *);
90 static void     ixlv_config_rss(struct ixlv_sc *);
91 static void     ixlv_stop(struct ixlv_sc *);
92 static void     ixlv_add_multi(struct ixl_vsi *);
93 static void     ixlv_del_multi(struct ixl_vsi *);
94 static void     ixlv_free_queues(struct ixl_vsi *);
95 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
96
97 static int      ixlv_media_change(struct ifnet *);
98 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
99
100 static void     ixlv_local_timer(void *);
101
102 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
103 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
104 static void     ixlv_init_filters(struct ixlv_sc *);
105 static void     ixlv_free_filters(struct ixlv_sc *);
106
107 static void     ixlv_msix_que(void *);
108 static void     ixlv_msix_adminq(void *);
109 static void     ixlv_do_adminq(void *, int);
110 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
111 static void     ixlv_handle_que(void *, int);
112 static int      ixlv_reset(struct ixlv_sc *);
113 static int      ixlv_reset_complete(struct i40e_hw *);
114 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
115 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
116 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
117                     enum i40e_status_code);
118
119 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
120 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
121 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
122 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
123
124 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
125 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
126 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
127
128 static void     ixlv_init_hw(struct ixlv_sc *);
129 static int      ixlv_setup_vc(struct ixlv_sc *);
130 static int      ixlv_vf_config(struct ixlv_sc *);
131
132 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
133                     struct ifnet *, int);
134
135 static void     ixlv_add_sysctls(struct ixlv_sc *);
136 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
137 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
138
139 /*********************************************************************
140  *  FreeBSD Device Interface Entry Points
141  *********************************************************************/
142
143 static device_method_t ixlv_methods[] = {
144         /* Device interface */
145         DEVMETHOD(device_probe, ixlv_probe),
146         DEVMETHOD(device_attach, ixlv_attach),
147         DEVMETHOD(device_detach, ixlv_detach),
148         DEVMETHOD(device_shutdown, ixlv_shutdown),
149         {0, 0}
150 };
151
152 static driver_t ixlv_driver = {
153         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
154 };
155
156 devclass_t ixlv_devclass;
157 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
158
159 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
160 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
161
162 /*
163 ** TUNEABLE PARAMETERS:
164 */
165
166 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
167                    "IXLV driver parameters");
168
169 /*
170 ** Number of descriptors per ring:
171 **   - TX and RX are the same size
172 */
173 static int ixlv_ringsz = DEFAULT_RING;
174 TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
175 SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
176     &ixlv_ringsz, 0, "Descriptor Ring Size");
177
178 /* Set to zero to auto calculate  */
179 int ixlv_max_queues = 0;
180 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
181 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
182     &ixlv_max_queues, 0, "Number of Queues");
183
184 /*
185 ** Number of entries in Tx queue buf_ring.
186 ** Increasing this will reduce the number of
187 ** errors when transmitting fragmented UDP
188 ** packets.
189 */
190 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
191 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
192 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
193     &ixlv_txbrsz, 0, "TX Buf Ring Size");
194
195 /*
196 ** Controls for Interrupt Throttling
197 **      - true/false for dynamic adjustment
198 **      - default values for static ITR
199 */
200 int ixlv_dynamic_rx_itr = 0;
201 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
202 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
203     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
204
205 int ixlv_dynamic_tx_itr = 0;
206 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
207 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
208     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
209
210 int ixlv_rx_itr = IXL_ITR_8K;
211 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
212 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
213     &ixlv_rx_itr, 0, "RX Interrupt Rate");
214
215 int ixlv_tx_itr = IXL_ITR_4K;
216 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
217 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
218     &ixlv_tx_itr, 0, "TX Interrupt Rate");
219
220         
221 /*********************************************************************
222  *  Device identification routine
223  *
224  *  ixlv_probe determines if the driver should be loaded on
225  *  the hardware based on PCI vendor/device id of the device.
226  *
227  *  return BUS_PROBE_DEFAULT on success, positive on failure
228  *********************************************************************/
229
230 static int
231 ixlv_probe(device_t dev)
232 {
233         ixl_vendor_info_t *ent;
234
235         u16     pci_vendor_id, pci_device_id;
236         u16     pci_subvendor_id, pci_subdevice_id;
237         char    device_name[256];
238
239         INIT_DEBUGOUT("ixlv_probe: begin");
240
241         pci_vendor_id = pci_get_vendor(dev);
242         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
243                 return (ENXIO);
244
245         pci_device_id = pci_get_device(dev);
246         pci_subvendor_id = pci_get_subvendor(dev);
247         pci_subdevice_id = pci_get_subdevice(dev);
248
249         ent = ixlv_vendor_info_array;
250         while (ent->vendor_id != 0) {
251                 if ((pci_vendor_id == ent->vendor_id) &&
252                     (pci_device_id == ent->device_id) &&
253
254                     ((pci_subvendor_id == ent->subvendor_id) ||
255                      (ent->subvendor_id == 0)) &&
256
257                     ((pci_subdevice_id == ent->subdevice_id) ||
258                      (ent->subdevice_id == 0))) {
259                         sprintf(device_name, "%s, Version - %s",
260                                 ixlv_strings[ent->index],
261                                 ixlv_driver_version);
262                         device_set_desc_copy(dev, device_name);
263                         return (BUS_PROBE_DEFAULT);
264                 }
265                 ent++;
266         }
267         return (ENXIO);
268 }
269
270 /*********************************************************************
271  *  Device initialization routine
272  *
273  *  The attach entry point is called when the driver is being loaded.
274  *  This routine identifies the type of hardware, allocates all resources
275  *  and initializes the hardware.
276  *
277  *  return 0 on success, positive on failure
278  *********************************************************************/
279
280 static int
281 ixlv_attach(device_t dev)
282 {
283         struct ixlv_sc  *sc;
284         struct i40e_hw  *hw;
285         struct ixl_vsi  *vsi;
286         int             error = 0;
287
288         INIT_DBG_DEV(dev, "begin");
289
290         /* Allocate, clear, and link in our primary soft structure */
291         sc = device_get_softc(dev);
292         sc->dev = sc->osdep.dev = dev;
293         hw = &sc->hw;
294         vsi = &sc->vsi;
295         vsi->dev = dev;
296
297         /* Initialize hw struct */
298         ixlv_init_hw(sc);
299
300         /* Allocate filter lists */
301         ixlv_init_filters(sc);
302
303         /* Core Lock Init*/
304         mtx_init(&sc->mtx, device_get_nameunit(dev),
305             "IXL SC Lock", MTX_DEF);
306
307         /* Set up the timer callout */
308         callout_init_mtx(&sc->timer, &sc->mtx, 0);
309
310         /* Do PCI setup - map BAR0, etc */
311         if (ixlv_allocate_pci_resources(sc)) {
312                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
313                     __func__);
314                 error = ENXIO;
315                 goto err_early;
316         }
317
318         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
319
320         error = i40e_set_mac_type(hw);
321         if (error) {
322                 device_printf(dev, "%s: set_mac_type failed: %d\n",
323                     __func__, error);
324                 goto err_pci_res;
325         }
326
327         error = ixlv_reset_complete(hw);
328         if (error) {
329                 device_printf(dev, "%s: Device is still being reset\n",
330                     __func__);
331                 goto err_pci_res;
332         }
333
334         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
335
336         error = ixlv_setup_vc(sc);
337         if (error) {
338                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
339                     __func__, error);
340                 goto err_pci_res;
341         }
342
343         INIT_DBG_DEV(dev, "PF API version verified");
344
345         /* TODO: Figure out why MDD events occur when this reset is removed. */
346         /* Need API version before sending reset message */
347         error = ixlv_reset(sc);
348         if (error) {
349                 device_printf(dev, "VF reset failed; reload the driver\n");
350                 goto err_aq;
351         }
352
353         INIT_DBG_DEV(dev, "VF reset complete");
354
355         /* Ask for VF config from PF */
356         error = ixlv_vf_config(sc);
357         if (error) {
358                 device_printf(dev, "Error getting configuration from PF: %d\n",
359                     error);
360                 goto err_aq;
361         }
362
363         INIT_DBG_DEV(dev, "VF config from PF:");
364         INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
365             sc->vf_res->num_vsis,
366             sc->vf_res->num_queue_pairs,
367             sc->vf_res->max_vectors,
368             sc->vf_res->max_mtu);
369         INIT_DBG_DEV(dev, "Offload flags: %#010x",
370             sc->vf_res->vf_offload_flags);
371
372         // TODO: Move this into ixlv_vf_config?
373         /* got VF config message back from PF, now we can parse it */
374         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
375                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
376                         sc->vsi_res = &sc->vf_res->vsi_res[i];
377         }
378         if (!sc->vsi_res) {
379                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
380                 error = EIO;
381                 goto err_res_buf;
382         }
383
384         INIT_DBG_DEV(dev, "Resource Acquisition complete");
385
386         /* If no mac address was assigned just make a random one */
387         if (!ixlv_check_ether_addr(hw->mac.addr)) {
388                 u8 addr[ETHER_ADDR_LEN];
389                 arc4rand(&addr, sizeof(addr), 0);
390                 addr[0] &= 0xFE;
391                 addr[0] |= 0x02;
392                 bcopy(addr, hw->mac.addr, sizeof(addr));
393         }
394
395         vsi->id = sc->vsi_res->vsi_id;
396         vsi->back = (void *)sc;
397         vsi->link_up = TRUE;
398
399         /* This allocates the memory and early settings */
400         if (ixlv_setup_queues(sc) != 0) {
401                 device_printf(dev, "%s: setup queues failed!\n",
402                     __func__);
403                 error = EIO;
404                 goto out;
405         }
406
407         /* Setup the stack interface */
408         if (ixlv_setup_interface(dev, sc) != 0) {
409                 device_printf(dev, "%s: setup interface failed!\n",
410                     __func__);
411                 error = EIO;
412                 goto out;
413         }
414
415         INIT_DBG_DEV(dev, "Queue memory and interface setup");
416
417         /* Do queue interrupt setup */
418         ixlv_assign_msix(sc);
419
420         /* Start AdminQ taskqueue */
421         ixlv_init_taskqueue(sc);
422
423         /* Initialize stats */
424         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
425         ixlv_add_sysctls(sc);
426
427         /* Register for VLAN events */
428         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
429             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
430         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
431             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
432
433         /* We want AQ enabled early */
434         ixlv_enable_adminq_irq(hw);
435
436         /* Set things up to run init */
437         sc->init_state = IXLV_INIT_READY;
438
439         ixl_vc_init_mgr(sc, &sc->vc_mgr);
440
441         INIT_DBG_DEV(dev, "end");
442         return (error);
443
444 out:
445         ixlv_free_queues(vsi);
446 err_res_buf:
447         free(sc->vf_res, M_DEVBUF);
448 err_aq:
449         i40e_shutdown_adminq(hw);
450 err_pci_res:
451         ixlv_free_pci_resources(sc);
452 err_early:
453         mtx_destroy(&sc->mtx);
454         ixlv_free_filters(sc);
455         INIT_DBG_DEV(dev, "end: error %d", error);
456         return (error);
457 }
458
459 /*********************************************************************
460  *  Device removal routine
461  *
462  *  The detach entry point is called when the driver is being removed.
463  *  This routine stops the adapter and deallocates all the resources
464  *  that were allocated for driver operation.
465  *
466  *  return 0 on success, positive on failure
467  *********************************************************************/
468
469 static int
470 ixlv_detach(device_t dev)
471 {
472         struct ixlv_sc  *sc = device_get_softc(dev);
473         struct ixl_vsi  *vsi = &sc->vsi;
474
475         INIT_DBG_DEV(dev, "begin");
476
477         /* Make sure VLANS are not using driver */
478         if (vsi->ifp->if_vlantrunk != NULL) {
479                 device_printf(dev, "Vlan in use, detach first\n");
480                 INIT_DBG_DEV(dev, "end");
481                 return (EBUSY);
482         }
483
484         /* Stop driver */
485         ether_ifdetach(vsi->ifp);
486         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
487                 mtx_lock(&sc->mtx);     
488                 ixlv_stop(sc);
489                 mtx_unlock(&sc->mtx);   
490         }
491
492         /* Unregister VLAN events */
493         if (vsi->vlan_attach != NULL)
494                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
495         if (vsi->vlan_detach != NULL)
496                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
497
498         /* Drain VC mgr */
499         callout_drain(&sc->vc_mgr.callout);
500
501         i40e_shutdown_adminq(&sc->hw);
502         taskqueue_free(sc->tq);
503         if_free(vsi->ifp);
504         free(sc->vf_res, M_DEVBUF);
505         ixlv_free_pci_resources(sc);
506         ixlv_free_queues(vsi);
507         mtx_destroy(&sc->mtx);
508         ixlv_free_filters(sc);
509
510         bus_generic_detach(dev);
511         INIT_DBG_DEV(dev, "end");
512         return (0);
513 }
514
515 /*********************************************************************
516  *
517  *  Shutdown entry point
518  *
519  **********************************************************************/
520
521 static int
522 ixlv_shutdown(device_t dev)
523 {
524         struct ixlv_sc  *sc = device_get_softc(dev);
525
526         INIT_DBG_DEV(dev, "begin");
527
528         mtx_lock(&sc->mtx);     
529         ixlv_stop(sc);
530         mtx_unlock(&sc->mtx);   
531
532         INIT_DBG_DEV(dev, "end");
533         return (0);
534 }
535
536 /*
537  * Configure TXCSUM(IPV6) and TSO(4/6)
538  *      - the hardware handles these together so we
539  *        need to tweak them 
540  */
541 static void
542 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
543 {
544         /* Enable/disable TXCSUM/TSO4 */
545         if (!(ifp->if_capenable & IFCAP_TXCSUM)
546             && !(ifp->if_capenable & IFCAP_TSO4)) {
547                 if (mask & IFCAP_TXCSUM) {
548                         ifp->if_capenable |= IFCAP_TXCSUM;
549                         /* enable TXCSUM, restore TSO if previously enabled */
550                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
551                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
552                                 ifp->if_capenable |= IFCAP_TSO4;
553                         }
554                 }
555                 else if (mask & IFCAP_TSO4) {
556                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
557                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
558                         if_printf(ifp,
559                             "TSO4 requires txcsum, enabling both...\n");
560                 }
561         } else if((ifp->if_capenable & IFCAP_TXCSUM)
562             && !(ifp->if_capenable & IFCAP_TSO4)) {
563                 if (mask & IFCAP_TXCSUM)
564                         ifp->if_capenable &= ~IFCAP_TXCSUM;
565                 else if (mask & IFCAP_TSO4)
566                         ifp->if_capenable |= IFCAP_TSO4;
567         } else if((ifp->if_capenable & IFCAP_TXCSUM)
568             && (ifp->if_capenable & IFCAP_TSO4)) {
569                 if (mask & IFCAP_TXCSUM) {
570                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
571                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
572                         if_printf(ifp, 
573                             "TSO4 requires txcsum, disabling both...\n");
574                 } else if (mask & IFCAP_TSO4)
575                         ifp->if_capenable &= ~IFCAP_TSO4;
576         }
577
578         /* Enable/disable TXCSUM_IPV6/TSO6 */
579         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
580             && !(ifp->if_capenable & IFCAP_TSO6)) {
581                 if (mask & IFCAP_TXCSUM_IPV6) {
582                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
583                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
584                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
585                                 ifp->if_capenable |= IFCAP_TSO6;
586                         }
587                 } else if (mask & IFCAP_TSO6) {
588                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
589                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
590                         if_printf(ifp,
591                             "TSO6 requires txcsum6, enabling both...\n");
592                 }
593         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
594             && !(ifp->if_capenable & IFCAP_TSO6)) {
595                 if (mask & IFCAP_TXCSUM_IPV6)
596                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
597                 else if (mask & IFCAP_TSO6)
598                         ifp->if_capenable |= IFCAP_TSO6;
599         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
600             && (ifp->if_capenable & IFCAP_TSO6)) {
601                 if (mask & IFCAP_TXCSUM_IPV6) {
602                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
603                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
604                         if_printf(ifp,
605                             "TSO6 requires txcsum6, disabling both...\n");
606                 } else if (mask & IFCAP_TSO6)
607                         ifp->if_capenable &= ~IFCAP_TSO6;
608         }
609 }
610
611 /*********************************************************************
612  *  Ioctl entry point
613  *
614  *  ixlv_ioctl is called when the user wants to configure the
615  *  interface.
616  *
617  *  return 0 on success, positive on failure
618  **********************************************************************/
619
620 static int
621 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
622 {
623         struct ixl_vsi          *vsi = ifp->if_softc;
624         struct ixlv_sc  *sc = vsi->back;
625         struct ifreq            *ifr = (struct ifreq *)data;
626 #if defined(INET) || defined(INET6)
627         struct ifaddr           *ifa = (struct ifaddr *)data;
628         bool                    avoid_reset = FALSE;
629 #endif
630         int                     error = 0;
631
632
633         switch (command) {
634
635         case SIOCSIFADDR:
636 #ifdef INET
637                 if (ifa->ifa_addr->sa_family == AF_INET)
638                         avoid_reset = TRUE;
639 #endif
640 #ifdef INET6
641                 if (ifa->ifa_addr->sa_family == AF_INET6)
642                         avoid_reset = TRUE;
643 #endif
644 #if defined(INET) || defined(INET6)
645                 /*
646                 ** Calling init results in link renegotiation,
647                 ** so we avoid doing it when possible.
648                 */
649                 if (avoid_reset) {
650                         ifp->if_flags |= IFF_UP;
651                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
652                                 ixlv_init(vsi);
653 #ifdef INET
654                         if (!(ifp->if_flags & IFF_NOARP))
655                                 arp_ifinit(ifp, ifa);
656 #endif
657                 } else
658                         error = ether_ioctl(ifp, command, data);
659                 break;
660 #endif
661         case SIOCSIFMTU:
662                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
663                 mtx_lock(&sc->mtx);
664                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
665                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
666                         error = EINVAL;
667                         IOCTL_DBG_IF(ifp, "mtu too large");
668                 } else {
669                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
670                         // ERJ: Interestingly enough, these types don't match
671                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
672                         vsi->max_frame_size =
673                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
674                             + ETHER_VLAN_ENCAP_LEN;
675                         ixlv_init_locked(sc);
676                 }
677                 mtx_unlock(&sc->mtx);
678                 break;
679         case SIOCSIFFLAGS:
680                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
681                 mtx_lock(&sc->mtx);
682                 if (ifp->if_flags & IFF_UP) {
683                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
684                                 ixlv_init_locked(sc);
685                 } else
686                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
687                                 ixlv_stop(sc);
688                 sc->if_flags = ifp->if_flags;
689                 mtx_unlock(&sc->mtx);
690                 break;
691         case SIOCADDMULTI:
692                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
693                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
694                         mtx_lock(&sc->mtx);
695                         ixlv_disable_intr(vsi);
696                         ixlv_add_multi(vsi);
697                         ixlv_enable_intr(vsi);
698                         mtx_unlock(&sc->mtx);
699                 }
700                 break;
701         case SIOCDELMULTI:
702                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
703                 if (sc->init_state == IXLV_RUNNING) {
704                         mtx_lock(&sc->mtx);
705                         ixlv_disable_intr(vsi);
706                         ixlv_del_multi(vsi);
707                         ixlv_enable_intr(vsi);
708                         mtx_unlock(&sc->mtx);
709                 }
710                 break;
711         case SIOCSIFMEDIA:
712         case SIOCGIFMEDIA:
713                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
714                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
715                 break;
716         case SIOCSIFCAP:
717         {
718                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
719                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
720
721                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
722
723                 if (mask & IFCAP_RXCSUM)
724                         ifp->if_capenable ^= IFCAP_RXCSUM;
725                 if (mask & IFCAP_RXCSUM_IPV6)
726                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
727                 if (mask & IFCAP_LRO)
728                         ifp->if_capenable ^= IFCAP_LRO;
729                 if (mask & IFCAP_VLAN_HWTAGGING)
730                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
731                 if (mask & IFCAP_VLAN_HWFILTER)
732                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
733                 if (mask & IFCAP_VLAN_HWTSO)
734                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
735                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
736                         ixlv_init(vsi);
737                 }
738                 VLAN_CAPABILITIES(ifp);
739
740                 break;
741         }
742
743         default:
744                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
745                 error = ether_ioctl(ifp, command, data);
746                 break;
747         }
748
749         return (error);
750 }
751
752 /*
753 ** To do a reinit on the VF is unfortunately more complicated
754 ** than a physical device, we must have the PF more or less
755 ** completely recreate our memory, so many things that were
756 ** done only once at attach in traditional drivers now must be
757 ** redone at each reinitialization. This function does that
758 ** 'prelude' so we can then call the normal locked init code.
759 */
760 int
761 ixlv_reinit_locked(struct ixlv_sc *sc)
762 {
763         struct i40e_hw          *hw = &sc->hw;
764         struct ixl_vsi          *vsi = &sc->vsi;
765         struct ifnet            *ifp = vsi->ifp;
766         struct ixlv_mac_filter  *mf, *mf_temp;
767         struct ixlv_vlan_filter *vf;
768         int                     error = 0;
769
770         INIT_DBG_IF(ifp, "begin");
771
772         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
773                 ixlv_stop(sc);
774
775         error = ixlv_reset(sc);
776
777         INIT_DBG_IF(ifp, "VF was reset");
778
779         /* set the state in case we went thru RESET */
780         sc->init_state = IXLV_RUNNING;
781
782         /*
783         ** Resetting the VF drops all filters from hardware;
784         ** we need to mark them to be re-added in init.
785         */
786         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
787                 if (mf->flags & IXL_FILTER_DEL) {
788                         SLIST_REMOVE(sc->mac_filters, mf,
789                             ixlv_mac_filter, next);
790                         free(mf, M_DEVBUF);
791                 } else
792                         mf->flags |= IXL_FILTER_ADD;
793         }
794         if (vsi->num_vlans != 0)
795                 SLIST_FOREACH(vf, sc->vlan_filters, next)
796                         vf->flags = IXL_FILTER_ADD;
797         else { /* clean any stale filters */
798                 while (!SLIST_EMPTY(sc->vlan_filters)) {
799                         vf = SLIST_FIRST(sc->vlan_filters);
800                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
801                         free(vf, M_DEVBUF);
802                 }
803         }
804
805         ixlv_enable_adminq_irq(hw);
806         ixl_vc_flush(&sc->vc_mgr);
807
808         INIT_DBG_IF(ifp, "end");
809         return (error);
810 }
811
812 static void
813 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
814         enum i40e_status_code code)
815 {
816         struct ixlv_sc *sc;
817
818         sc = arg;
819
820         /*
821          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
822          * happens while a command is in progress, so we don't print an error
823          * in that case.
824          */
825         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
826                 if_printf(sc->vsi.ifp,
827                     "Error %d waiting for PF to complete operation %d\n",
828                     code, cmd->request);
829         }
830 }
831
832 static void
833 ixlv_init_locked(struct ixlv_sc *sc)
834 {
835         struct i40e_hw          *hw = &sc->hw;
836         struct ixl_vsi          *vsi = &sc->vsi;
837         struct ixl_queue        *que = vsi->queues;
838         struct ifnet            *ifp = vsi->ifp;
839         int                      error = 0;
840
841         INIT_DBG_IF(ifp, "begin");
842
843         IXLV_CORE_LOCK_ASSERT(sc);
844
845         /* Do a reinit first if an init has already been done */
846         if ((sc->init_state == IXLV_RUNNING) ||
847             (sc->init_state == IXLV_RESET_REQUIRED) ||
848             (sc->init_state == IXLV_RESET_PENDING))
849                 error = ixlv_reinit_locked(sc);
850         /* Don't bother with init if we failed reinit */
851         if (error)
852                 goto init_done;
853
854         /* Remove existing MAC filter if new MAC addr is set */
855         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
856                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
857                 if (error == 0)
858                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
859                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
860                             sc);
861         }
862
863         /* Check for an LAA mac address... */
864         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
865
866         ifp->if_hwassist = 0;
867         if (ifp->if_capenable & IFCAP_TSO)
868                 ifp->if_hwassist |= CSUM_TSO;
869         if (ifp->if_capenable & IFCAP_TXCSUM)
870                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
871         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
873
874         /* Add mac filter for this VF to PF */
875         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
876                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
877                 if (!error || error == EEXIST)
878                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
879                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
880                             sc);
881         }
882
883         /* Setup vlan's if needed */
884         ixlv_setup_vlan_filters(sc);
885
886         /* Prepare the queues for operation */
887         for (int i = 0; i < vsi->num_queues; i++, que++) {
888                 struct  rx_ring *rxr = &que->rxr;
889
890                 ixl_init_tx_ring(que);
891
892                 if (vsi->max_frame_size <= 2048)
893                         rxr->mbuf_sz = MCLBYTES;
894                 else
895                         rxr->mbuf_sz = MJUMPAGESIZE;
896                 ixl_init_rx_ring(que);
897         }
898
899         /* Configure queues */
900         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
901             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
902
903         /* Set up RSS */
904         ixlv_config_rss(sc);
905
906         /* Map vectors */
907         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
908             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
909
910         /* Enable queues */
911         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
912             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
913
914         /* Start the local timer */
915         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
916
917         sc->init_state = IXLV_RUNNING;
918
919 init_done:
920         INIT_DBG_IF(ifp, "end");
921         return;
922 }
923
924 /*
925 **  Init entry point for the stack
926 */
927 void
928 ixlv_init(void *arg)
929 {
930         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
931         struct ixlv_sc *sc = vsi->back;
932         int retries = 0;
933
934         mtx_lock(&sc->mtx);
935         ixlv_init_locked(sc);
936         mtx_unlock(&sc->mtx);
937
938         /* Wait for init_locked to finish */
939         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
940             && ++retries < 100) {
941                 i40e_msec_delay(10);
942         }
943         if (retries >= IXLV_AQ_MAX_ERR)
944                 if_printf(vsi->ifp,
945                     "Init failed to complete in alloted time!\n");
946 }
947
948 /*
949  * ixlv_attach() helper function; gathers information about
950  * the (virtual) hardware for use elsewhere in the driver.
951  */
952 static void
953 ixlv_init_hw(struct ixlv_sc *sc)
954 {
955         struct i40e_hw *hw = &sc->hw;
956         device_t dev = sc->dev;
957         
958         /* Save off the information about this board */
959         hw->vendor_id = pci_get_vendor(dev);
960         hw->device_id = pci_get_device(dev);
961         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
962         hw->subsystem_vendor_id =
963             pci_read_config(dev, PCIR_SUBVEND_0, 2);
964         hw->subsystem_device_id =
965             pci_read_config(dev, PCIR_SUBDEV_0, 2);
966
967         hw->bus.device = pci_get_slot(dev);
968         hw->bus.func = pci_get_function(dev);
969 }
970
971 /*
972  * ixlv_attach() helper function; initalizes the admin queue
973  * and attempts to establish contact with the PF by
974  * retrying the initial "API version" message several times
975  * or until the PF responds.
976  */
977 static int
978 ixlv_setup_vc(struct ixlv_sc *sc)
979 {
980         struct i40e_hw *hw = &sc->hw;
981         device_t dev = sc->dev;
982         int error = 0, ret_error = 0, asq_retries = 0;
983         bool send_api_ver_retried = 0;
984
985         /* Need to set these AQ paramters before initializing AQ */
986         hw->aq.num_arq_entries = IXL_AQ_LEN;
987         hw->aq.num_asq_entries = IXL_AQ_LEN;
988         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
989         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
990
991         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
992                 /* Initialize admin queue */
993                 error = i40e_init_adminq(hw);
994                 if (error) {
995                         device_printf(dev, "%s: init_adminq failed: %d\n",
996                             __func__, error);
997                         ret_error = 1;
998                         continue;
999                 }
1000
1001                 INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1002
1003 retry_send:
1004                 /* Send VF's API version */
1005                 error = ixlv_send_api_ver(sc);
1006                 if (error) {
1007                         i40e_shutdown_adminq(hw);
1008                         ret_error = 2;
1009                         device_printf(dev, "%s: unable to send api"
1010                             " version to PF on attempt %d, error %d\n",
1011                             __func__, i+1, error);
1012                 }
1013
1014                 asq_retries = 0;
1015                 while (!i40e_asq_done(hw)) {
1016                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1017                                 i40e_shutdown_adminq(hw);
1018                                 DDPRINTF(dev, "Admin Queue timeout "
1019                                     "(waiting for send_api_ver), %d more retries...",
1020                                     IXLV_AQ_MAX_ERR - (i + 1));
1021                                 ret_error = 3;
1022                                 break;
1023                         } 
1024                         i40e_msec_delay(10);
1025                 }
1026                 if (asq_retries > IXLV_AQ_MAX_ERR)
1027                         continue;
1028
1029                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1030
1031                 /* Verify that the VF accepts the PF's API version */
1032                 error = ixlv_verify_api_ver(sc);
1033                 if (error == ETIMEDOUT) {
1034                         if (!send_api_ver_retried) {
1035                                 /* Resend message, one more time */
1036                                 send_api_ver_retried++;
1037                                 device_printf(dev,
1038                                     "%s: Timeout while verifying API version on first"
1039                                     " try!\n", __func__);
1040                                 goto retry_send;
1041                         } else {
1042                                 device_printf(dev,
1043                                     "%s: Timeout while verifying API version on second"
1044                                     " try!\n", __func__);
1045                                 ret_error = 4;
1046                                 break;
1047                         }
1048                 }
1049                 if (error) {
1050                         device_printf(dev,
1051                             "%s: Unable to verify API version,"
1052                             " error %d\n", __func__, error);
1053                         ret_error = 5;
1054                 }
1055                 break;
1056         }
1057
1058         if (ret_error >= 4)
1059                 i40e_shutdown_adminq(hw);
1060         return (ret_error);
1061 }
1062
1063 /*
1064  * ixlv_attach() helper function; asks the PF for this VF's
1065  * configuration, and saves the information if it receives it.
1066  */
1067 static int
1068 ixlv_vf_config(struct ixlv_sc *sc)
1069 {
1070         struct i40e_hw *hw = &sc->hw;
1071         device_t dev = sc->dev;
1072         int bufsz, error = 0, ret_error = 0;
1073         int asq_retries, retried = 0;
1074
1075 retry_config:
1076         error = ixlv_send_vf_config_msg(sc);
1077         if (error) {
1078                 device_printf(dev,
1079                     "%s: Unable to send VF config request, attempt %d,"
1080                     " error %d\n", __func__, retried + 1, error);
1081                 ret_error = 2;
1082         }
1083
1084         asq_retries = 0;
1085         while (!i40e_asq_done(hw)) {
1086                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1087                         device_printf(dev, "%s: Admin Queue timeout "
1088                             "(waiting for send_vf_config_msg), attempt %d\n",
1089                             __func__, retried + 1);
1090                         ret_error = 3;
1091                         goto fail;
1092                 }
1093                 i40e_msec_delay(10);
1094         }
1095
1096         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1097             retried + 1);
1098
1099         if (!sc->vf_res) {
1100                 bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1101                     (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1102                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1103                 if (!sc->vf_res) {
1104                         device_printf(dev,
1105                             "%s: Unable to allocate memory for VF configuration"
1106                             " message from PF on attempt %d\n", __func__, retried + 1);
1107                         ret_error = 1;
1108                         goto fail;
1109                 }
1110         }
1111
1112         /* Check for VF config response */
1113         error = ixlv_get_vf_config(sc);
1114         if (error == ETIMEDOUT) {
1115                 /* The 1st time we timeout, send the configuration message again */
1116                 if (!retried) {
1117                         retried++;
1118                         goto retry_config;
1119                 }
1120         }
1121         if (error) {
1122                 device_printf(dev,
1123                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1124                     __func__, retried + 1);
1125                 ret_error = 4;
1126         }
1127         goto done;
1128
1129 fail:
1130         free(sc->vf_res, M_DEVBUF);
1131 done:
1132         return (ret_error);
1133 }
1134
1135 /*
1136  * Allocate MSI/X vectors, setup the AQ vector early
1137  */
1138 static int
1139 ixlv_init_msix(struct ixlv_sc *sc)
1140 {
1141         device_t dev = sc->dev;
1142         int rid, want, vectors, queues, available;
1143
1144         rid = PCIR_BAR(IXL_BAR);
1145         sc->msix_mem = bus_alloc_resource_any(dev,
1146             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1147         if (!sc->msix_mem) {
1148                 /* May not be enabled */
1149                 device_printf(sc->dev,
1150                     "Unable to map MSIX table \n");
1151                 goto fail;
1152         }
1153
1154         available = pci_msix_count(dev); 
1155         if (available == 0) { /* system has msix disabled */
1156                 bus_release_resource(dev, SYS_RES_MEMORY,
1157                     rid, sc->msix_mem);
1158                 sc->msix_mem = NULL;
1159                 goto fail;
1160         }
1161
1162         /* Figure out a reasonable auto config value */
1163         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1164
1165         /* Override with hardcoded value if sane */
1166         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
1167                 queues = ixlv_max_queues;
1168 #ifdef  RSS
1169         /* If we're doing RSS, clamp at the number of RSS buckets */
1170         if (queues > rss_getnumbuckets())
1171                 queues = rss_getnumbuckets();
1172 #endif
1173         /* Enforce the VF max value */
1174         if (queues > IXLV_MAX_QUEUES)
1175                 queues = IXLV_MAX_QUEUES;
1176
1177         /*
1178         ** Want one vector (RX/TX pair) per queue
1179         ** plus an additional for the admin queue.
1180         */
1181         want = queues + 1;
1182         if (want <= available)  /* Have enough */
1183                 vectors = want;
1184         else {
1185                 device_printf(sc->dev,
1186                     "MSIX Configuration Problem, "
1187                     "%d vectors available but %d wanted!\n",
1188                     available, want);
1189                 goto fail;
1190         }
1191
1192 #ifdef RSS
1193         /*
1194         * If we're doing RSS, the number of queues needs to
1195         * match the number of RSS buckets that are configured.
1196         *
1197         * + If there's more queues than RSS buckets, we'll end
1198         *   up with queues that get no traffic.
1199         *
1200         * + If there's more RSS buckets than queues, we'll end
1201         *   up having multiple RSS buckets map to the same queue,
1202         *   so there'll be some contention.
1203         */
1204         if (queues != rss_getnumbuckets()) {
1205                 device_printf(dev,
1206                     "%s: queues (%d) != RSS buckets (%d)"
1207                     "; performance will be impacted.\n",
1208                      __func__, queues, rss_getnumbuckets());
1209         }
1210 #endif
1211
1212         if (pci_alloc_msix(dev, &vectors) == 0) {
1213                 device_printf(sc->dev,
1214                     "Using MSIX interrupts with %d vectors\n", vectors);
1215                 sc->msix = vectors;
1216                 sc->vsi.num_queues = queues;
1217         }
1218
1219         /*
1220         ** Explicitly set the guest PCI BUSMASTER capability
1221         ** and we must rewrite the ENABLE in the MSIX control
1222         ** register again at this point to cause the host to
1223         ** successfully initialize us.
1224         */
1225         {
1226                 u16 pci_cmd_word;
1227                 int msix_ctrl;
1228                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1229                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1230                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1231                 pci_find_cap(dev, PCIY_MSIX, &rid);
1232                 rid += PCIR_MSIX_CTRL;
1233                 msix_ctrl = pci_read_config(dev, rid, 2);
1234                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1235                 pci_write_config(dev, rid, msix_ctrl, 2);
1236         }
1237
1238         /* Next we need to setup the vector for the Admin Queue */
1239         rid = 1;        // zero vector + 1
1240         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1241             &rid, RF_SHAREABLE | RF_ACTIVE);
1242         if (sc->res == NULL) {
1243                 device_printf(dev,"Unable to allocate"
1244                     " bus resource: AQ interrupt \n");
1245                 goto fail;
1246         }
1247         if (bus_setup_intr(dev, sc->res,
1248             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1249             ixlv_msix_adminq, sc, &sc->tag)) {
1250                 sc->res = NULL;
1251                 device_printf(dev, "Failed to register AQ handler");
1252                 goto fail;
1253         }
1254         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1255
1256         return (vectors);
1257
1258 fail:
1259         /* The VF driver MUST use MSIX */
1260         return (0);
1261 }
1262
1263 static int
1264 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1265 {
1266         int             rid;
1267         device_t        dev = sc->dev;
1268
1269         rid = PCIR_BAR(0);
1270         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1271             &rid, RF_ACTIVE);
1272
1273         if (!(sc->pci_mem)) {
1274                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1275                 return (ENXIO);
1276         }
1277
1278         sc->osdep.mem_bus_space_tag =
1279                 rman_get_bustag(sc->pci_mem);
1280         sc->osdep.mem_bus_space_handle =
1281                 rman_get_bushandle(sc->pci_mem);
1282         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1283         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1284         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1285
1286         sc->hw.back = &sc->osdep;
1287
1288         /* Disable adminq interrupts */
1289         ixlv_disable_adminq_irq(&sc->hw);
1290
1291         /*
1292         ** Now setup MSI/X, it will return
1293         ** us the number of supported vectors
1294         */
1295         sc->msix = ixlv_init_msix(sc);
1296
1297         /* We fail without MSIX support */
1298         if (sc->msix == 0)
1299                 return (ENXIO);
1300
1301         return (0);
1302 }
1303
1304 static void
1305 ixlv_free_pci_resources(struct ixlv_sc *sc)
1306 {
1307         struct ixl_vsi         *vsi = &sc->vsi;
1308         struct ixl_queue       *que = vsi->queues;
1309         device_t                dev = sc->dev;
1310
1311         /* We may get here before stations are setup */
1312         if (que == NULL)
1313                 goto early;
1314
1315         /*
1316         **  Release all msix queue resources:
1317         */
1318         for (int i = 0; i < vsi->num_queues; i++, que++) {
1319                 int rid = que->msix + 1;
1320                 if (que->tag != NULL) {
1321                         bus_teardown_intr(dev, que->res, que->tag);
1322                         que->tag = NULL;
1323                 }
1324                 if (que->res != NULL)
1325                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1326         }
1327         
1328 early:
1329         /* Clean the AdminQ interrupt */
1330         if (sc->tag != NULL) {
1331                 bus_teardown_intr(dev, sc->res, sc->tag);
1332                 sc->tag = NULL;
1333         }
1334         if (sc->res != NULL)
1335                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1336
1337         pci_release_msi(dev);
1338
1339         if (sc->msix_mem != NULL)
1340                 bus_release_resource(dev, SYS_RES_MEMORY,
1341                     PCIR_BAR(IXL_BAR), sc->msix_mem);
1342
1343         if (sc->pci_mem != NULL)
1344                 bus_release_resource(dev, SYS_RES_MEMORY,
1345                     PCIR_BAR(0), sc->pci_mem);
1346
1347         return;
1348 }
1349
1350 /*
1351  * Create taskqueue and tasklet for Admin Queue interrupts.
1352  */
1353 static int
1354 ixlv_init_taskqueue(struct ixlv_sc *sc)
1355 {
1356         int error = 0;
1357
1358         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1359
1360         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1361             taskqueue_thread_enqueue, &sc->tq);
1362         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1363             device_get_nameunit(sc->dev));
1364
1365         return (error);
1366 }
1367
1368 /*********************************************************************
1369  *
1370  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1371  *
1372  **********************************************************************/
1373 static int
1374 ixlv_assign_msix(struct ixlv_sc *sc)
1375 {
1376         device_t        dev = sc->dev;
1377         struct          ixl_vsi *vsi = &sc->vsi;
1378         struct          ixl_queue *que = vsi->queues;
1379         struct          tx_ring  *txr;
1380         int             error, rid, vector = 1;
1381
1382         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1383                 int cpu_id = i;
1384                 rid = vector + 1;
1385                 txr = &que->txr;
1386                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1387                     RF_SHAREABLE | RF_ACTIVE);
1388                 if (que->res == NULL) {
1389                         device_printf(dev,"Unable to allocate"
1390                             " bus resource: que interrupt [%d]\n", vector);
1391                         return (ENXIO);
1392                 }
1393                 /* Set the handler function */
1394                 error = bus_setup_intr(dev, que->res,
1395                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1396                     ixlv_msix_que, que, &que->tag);
1397                 if (error) {
1398                         que->res = NULL;
1399                         device_printf(dev, "Failed to register que handler");
1400                         return (error);
1401                 }
1402                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1403                 /* Bind the vector to a CPU */
1404 #ifdef RSS
1405                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1406 #endif
1407                 bus_bind_intr(dev, que->res, cpu_id);
1408                 que->msix = vector;
1409                 vsi->que_mask |= (u64)(1 << que->msix);
1410                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1411                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1412                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1413                     taskqueue_thread_enqueue, &que->tq);
1414 #ifdef RSS
1415                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1416                     cpu_id, "%s (bucket %d)",
1417                     device_get_nameunit(dev), cpu_id);
1418 #else
1419                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1420                     "%s que", device_get_nameunit(dev));
1421 #endif
1422
1423         }
1424
1425         return (0);
1426 }
1427
1428 /*
1429 ** Requests a VF reset from the PF.
1430 **
1431 ** Requires the VF's Admin Queue to be initialized.
1432 */
1433 static int
1434 ixlv_reset(struct ixlv_sc *sc)
1435 {
1436         struct i40e_hw  *hw = &sc->hw;
1437         device_t        dev = sc->dev;
1438         int             error = 0;
1439
1440         /* Ask the PF to reset us if we are initiating */
1441         if (sc->init_state != IXLV_RESET_PENDING)
1442                 ixlv_request_reset(sc);
1443
1444         i40e_msec_delay(100);
1445         error = ixlv_reset_complete(hw);
1446         if (error) {
1447                 device_printf(dev, "%s: VF reset failed\n",
1448                     __func__);
1449                 return (error);
1450         }
1451
1452         error = i40e_shutdown_adminq(hw);
1453         if (error) {
1454                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1455                     __func__, error);
1456                 return (error);
1457         }
1458
1459         error = i40e_init_adminq(hw);
1460         if (error) {
1461                 device_printf(dev, "%s: init_adminq failed: %d\n",
1462                     __func__, error);
1463                 return(error);
1464         }
1465
1466         return (0);
1467 }
1468
1469 static int
1470 ixlv_reset_complete(struct i40e_hw *hw)
1471 {
1472         u32 reg;
1473
1474         for (int i = 0; i < 100; i++) {
1475                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1476                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1477
1478                 if ((reg == I40E_VFR_VFACTIVE) ||
1479                     (reg == I40E_VFR_COMPLETED))
1480                         return (0);
1481                 i40e_msec_delay(100);
1482         }
1483
1484         return (EBUSY);
1485 }
1486
1487
1488 /*********************************************************************
1489  *
1490  *  Setup networking device structure and register an interface.
1491  *
1492  **********************************************************************/
1493 static int
1494 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1495 {
1496         struct ifnet            *ifp;
1497         struct ixl_vsi          *vsi = &sc->vsi;
1498         struct ixl_queue        *que = vsi->queues;
1499
1500         INIT_DBG_DEV(dev, "begin");
1501
1502         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1503         if (ifp == NULL) {
1504                 device_printf(dev, "%s: could not allocate ifnet"
1505                     " structure!\n", __func__);
1506                 return (-1);
1507         }
1508
1509         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1510
1511         ifp->if_mtu = ETHERMTU;
1512         ifp->if_baudrate = 4000000000;  // ??
1513         ifp->if_init = ixlv_init;
1514         ifp->if_softc = vsi;
1515         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1516         ifp->if_ioctl = ixlv_ioctl;
1517
1518 #if __FreeBSD_version >= 1100000
1519         if_setgetcounterfn(ifp, ixl_get_counter);
1520 #endif
1521
1522         ifp->if_transmit = ixl_mq_start;
1523
1524         ifp->if_qflush = ixl_qflush;
1525         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1526
1527         ether_ifattach(ifp, sc->hw.mac.addr);
1528
1529         vsi->max_frame_size =
1530             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1531             + ETHER_VLAN_ENCAP_LEN;
1532
1533         /*
1534          * Tell the upper layer(s) we support long frames.
1535          */
1536         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1537
1538         ifp->if_capabilities |= IFCAP_HWCSUM;
1539         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1540         ifp->if_capabilities |= IFCAP_TSO;
1541         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1542
1543         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1544                              |  IFCAP_VLAN_HWTSO
1545                              |  IFCAP_VLAN_MTU
1546                              |  IFCAP_VLAN_HWCSUM
1547                              |  IFCAP_LRO;
1548         ifp->if_capenable = ifp->if_capabilities;
1549
1550         /*
1551         ** Don't turn this on by default, if vlans are
1552         ** created on another pseudo device (eg. lagg)
1553         ** then vlan events are not passed thru, breaking
1554         ** operation, but with HW FILTER off it works. If
1555         ** using vlans directly on the ixl driver you can
1556         ** enable this and get full hardware tag filtering.
1557         */
1558         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1559
1560         /*
1561          * Specify the media types supported by this adapter and register
1562          * callbacks to update media and link information
1563          */
1564         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1565                      ixlv_media_status);
1566
1567         // JFV Add media types later?
1568
1569         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1570         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1571
1572         INIT_DBG_DEV(dev, "end");
1573         return (0);
1574 }
1575
1576 /*
1577 ** Allocate and setup the interface queues
1578 */
1579 static int
1580 ixlv_setup_queues(struct ixlv_sc *sc)
1581 {
1582         device_t                dev = sc->dev;
1583         struct ixl_vsi          *vsi;
1584         struct ixl_queue        *que;
1585         struct tx_ring          *txr;
1586         struct rx_ring          *rxr;
1587         int                     rsize, tsize;
1588         int                     error = I40E_SUCCESS;
1589
1590         vsi = &sc->vsi;
1591         vsi->back = (void *)sc;
1592         vsi->hw = &sc->hw;
1593         vsi->num_vlans = 0;
1594
1595         /* Get memory for the station queues */
1596         if (!(vsi->queues =
1597                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1598                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1599                         device_printf(dev, "Unable to allocate queue memory\n");
1600                         error = ENOMEM;
1601                         goto early;
1602         }
1603
1604         for (int i = 0; i < vsi->num_queues; i++) {
1605                 que = &vsi->queues[i];
1606                 que->num_desc = ixlv_ringsz;
1607                 que->me = i;
1608                 que->vsi = vsi;
1609                 /* mark the queue as active */
1610                 vsi->active_queues |= (u64)1 << que->me;
1611
1612                 txr = &que->txr;
1613                 txr->que = que;
1614                 txr->tail = I40E_QTX_TAIL1(que->me);
1615                 /* Initialize the TX lock */
1616                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1617                     device_get_nameunit(dev), que->me);
1618                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1619                 /*
1620                 ** Create the TX descriptor ring, the extra int is
1621                 ** added as the location for HEAD WB.
1622                 */
1623                 tsize = roundup2((que->num_desc *
1624                     sizeof(struct i40e_tx_desc)) +
1625                     sizeof(u32), DBA_ALIGN);
1626                 if (i40e_allocate_dma_mem(&sc->hw,
1627                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1628                         device_printf(dev,
1629                             "Unable to allocate TX Descriptor memory\n");
1630                         error = ENOMEM;
1631                         goto fail;
1632                 }
1633                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
1634                 bzero((void *)txr->base, tsize);
1635                 /* Now allocate transmit soft structs for the ring */
1636                 if (ixl_allocate_tx_data(que)) {
1637                         device_printf(dev,
1638                             "Critical Failure setting up TX structures\n");
1639                         error = ENOMEM;
1640                         goto fail;
1641                 }
1642                 /* Allocate a buf ring */
1643                 txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1644                     M_WAITOK, &txr->mtx);
1645                 if (txr->br == NULL) {
1646                         device_printf(dev,
1647                             "Critical Failure setting up TX buf ring\n");
1648                         error = ENOMEM;
1649                         goto fail;
1650                 }
1651
1652                 /*
1653                  * Next the RX queues...
1654                  */ 
1655                 rsize = roundup2(que->num_desc *
1656                     sizeof(union i40e_rx_desc), DBA_ALIGN);
1657                 rxr = &que->rxr;
1658                 rxr->que = que;
1659                 rxr->tail = I40E_QRX_TAIL1(que->me);
1660
1661                 /* Initialize the RX side lock */
1662                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1663                     device_get_nameunit(dev), que->me);
1664                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1665
1666                 if (i40e_allocate_dma_mem(&sc->hw,
1667                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1668                         device_printf(dev,
1669                             "Unable to allocate RX Descriptor memory\n");
1670                         error = ENOMEM;
1671                         goto fail;
1672                 }
1673                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1674                 bzero((void *)rxr->base, rsize);
1675
1676                 /* Allocate receive soft structs for the ring*/
1677                 if (ixl_allocate_rx_data(que)) {
1678                         device_printf(dev,
1679                             "Critical Failure setting up receive structs\n");
1680                         error = ENOMEM;
1681                         goto fail;
1682                 }
1683         }
1684
1685         return (0);
1686
1687 fail:
1688         for (int i = 0; i < vsi->num_queues; i++) {
1689                 que = &vsi->queues[i];
1690                 rxr = &que->rxr;
1691                 txr = &que->txr;
1692                 if (rxr->base)
1693                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1694                 if (txr->base)
1695                         i40e_free_dma_mem(&sc->hw, &txr->dma);
1696         }
1697         free(vsi->queues, M_DEVBUF);
1698
1699 early:
1700         return (error);
1701 }
1702
1703 /*
1704 ** This routine is run via an vlan config EVENT,
1705 ** it enables us to use the HW Filter table since
1706 ** we can get the vlan id. This just creates the
1707 ** entry in the soft version of the VFTA, init will
1708 ** repopulate the real table.
1709 */
1710 static void
1711 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1712 {
1713         struct ixl_vsi                  *vsi = ifp->if_softc;
1714         struct ixlv_sc          *sc = vsi->back;
1715         struct ixlv_vlan_filter *v;
1716
1717
1718         if (ifp->if_softc !=  arg)   /* Not our event */
1719                 return;
1720
1721         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1722                 return;
1723
1724         /* Sanity check - make sure it doesn't already exist */
1725         SLIST_FOREACH(v, sc->vlan_filters, next) {
1726                 if (v->vlan == vtag)
1727                         return;
1728         }
1729
1730         mtx_lock(&sc->mtx);
1731         ++vsi->num_vlans;
1732         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1733         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1734         v->vlan = vtag;
1735         v->flags = IXL_FILTER_ADD;
1736         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1737             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1738         mtx_unlock(&sc->mtx);
1739         return;
1740 }
1741
1742 /*
1743 ** This routine is run via an vlan
1744 ** unconfig EVENT, remove our entry
1745 ** in the soft vfta.
1746 */
1747 static void
1748 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1749 {
1750         struct ixl_vsi                  *vsi = ifp->if_softc;
1751         struct ixlv_sc          *sc = vsi->back;
1752         struct ixlv_vlan_filter *v;
1753         int                             i = 0;
1754         
1755         if (ifp->if_softc !=  arg)
1756                 return;
1757
1758         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1759                 return;
1760
1761         mtx_lock(&sc->mtx);
1762         SLIST_FOREACH(v, sc->vlan_filters, next) {
1763                 if (v->vlan == vtag) {
1764                         v->flags = IXL_FILTER_DEL;
1765                         ++i;
1766                         --vsi->num_vlans;
1767                 }
1768         }
1769         if (i)
1770                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1771                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1772         mtx_unlock(&sc->mtx);
1773         return;
1774 }
1775
1776 /*
1777 ** Get a new filter and add it to the mac filter list.
1778 */
1779 static struct ixlv_mac_filter *
1780 ixlv_get_mac_filter(struct ixlv_sc *sc)
1781 {
1782         struct ixlv_mac_filter  *f;
1783
1784         f = malloc(sizeof(struct ixlv_mac_filter),
1785             M_DEVBUF, M_NOWAIT | M_ZERO);
1786         if (f)
1787                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1788
1789         return (f);
1790 }
1791
1792 /*
1793 ** Find the filter with matching MAC address
1794 */
1795 static struct ixlv_mac_filter *
1796 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1797 {
1798         struct ixlv_mac_filter  *f;
1799         bool                            match = FALSE;
1800
1801         SLIST_FOREACH(f, sc->mac_filters, next) {
1802                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1803                         match = TRUE;
1804                         break;
1805                 }
1806         }       
1807
1808         if (!match)
1809                 f = NULL;
1810         return (f);
1811 }
1812
1813 /*
1814 ** Admin Queue interrupt handler
1815 */
1816 static void
1817 ixlv_msix_adminq(void *arg)
1818 {
1819         struct ixlv_sc  *sc = arg;
1820         struct i40e_hw  *hw = &sc->hw;
1821         device_t        dev = sc->dev;
1822         u32             reg, mask, oldreg;
1823
1824         reg = rd32(hw, I40E_VFINT_ICR01);
1825         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1826
1827         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1828         reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1829         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1830
1831         /* check for Admin queue errors */
1832         oldreg = reg = rd32(hw, hw->aq.arq.len);
1833         if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
1834                 device_printf(dev, "ARQ VF Error detected\n");
1835                 reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
1836         }
1837         if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
1838                 device_printf(dev, "ARQ Overflow Error detected\n");
1839                 reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
1840         }
1841         if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
1842                 device_printf(dev, "ARQ Critical Error detected\n");
1843                 reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
1844         }
1845         if (oldreg != reg)
1846                 wr32(hw, hw->aq.arq.len, reg);
1847
1848         oldreg = reg = rd32(hw, hw->aq.asq.len);
1849         if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
1850                 device_printf(dev, "ASQ VF Error detected\n");
1851                 reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
1852         }
1853         if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
1854                 device_printf(dev, "ASQ Overflow Error detected\n");
1855                 reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
1856         }
1857         if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
1858                 device_printf(dev, "ASQ Critical Error detected\n");
1859                 reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
1860         }
1861         if (oldreg != reg)
1862                 wr32(hw, hw->aq.asq.len, reg);
1863
1864         /* re-enable interrupt causes */
1865         wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1866         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1867
1868         /* schedule task */
1869         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1870         return;
1871 }
1872
1873 void
1874 ixlv_enable_intr(struct ixl_vsi *vsi)
1875 {
1876         struct i40e_hw          *hw = vsi->hw;
1877         struct ixl_queue        *que = vsi->queues;
1878
1879         ixlv_enable_adminq_irq(hw);
1880         for (int i = 0; i < vsi->num_queues; i++, que++)
1881                 ixlv_enable_queue_irq(hw, que->me);
1882 }
1883
1884 void
1885 ixlv_disable_intr(struct ixl_vsi *vsi)
1886 {
1887         struct i40e_hw          *hw = vsi->hw;
1888         struct ixl_queue       *que = vsi->queues;
1889
1890         ixlv_disable_adminq_irq(hw);
1891         for (int i = 0; i < vsi->num_queues; i++, que++)
1892                 ixlv_disable_queue_irq(hw, que->me);
1893 }
1894
1895
1896 static void
1897 ixlv_disable_adminq_irq(struct i40e_hw *hw)
1898 {
1899         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1900         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1901         /* flush */
1902         rd32(hw, I40E_VFGEN_RSTAT);
1903         return;
1904 }
1905
1906 static void
1907 ixlv_enable_adminq_irq(struct i40e_hw *hw)
1908 {
1909         wr32(hw, I40E_VFINT_DYN_CTL01,
1910             I40E_VFINT_DYN_CTL01_INTENA_MASK |
1911             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1912         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1913         /* flush */
1914         rd32(hw, I40E_VFGEN_RSTAT);
1915         return;
1916 }
1917
1918 static void
1919 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1920 {
1921         u32             reg;
1922
1923         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1924             I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; 
1925         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1926 }
1927
1928 static void
1929 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1930 {
1931         wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1932         rd32(hw, I40E_VFGEN_RSTAT);
1933         return;
1934 }
1935
1936
1937 /*
1938 ** Provide a update to the queue RX
1939 ** interrupt moderation value.
1940 */
1941 static void
1942 ixlv_set_queue_rx_itr(struct ixl_queue *que)
1943 {
1944         struct ixl_vsi  *vsi = que->vsi;
1945         struct i40e_hw  *hw = vsi->hw;
1946         struct rx_ring  *rxr = &que->rxr;
1947         u16             rx_itr;
1948         u16             rx_latency = 0;
1949         int             rx_bytes;
1950
1951
1952         /* Idle, do nothing */
1953         if (rxr->bytes == 0)
1954                 return;
1955
1956         if (ixlv_dynamic_rx_itr) {
1957                 rx_bytes = rxr->bytes/rxr->itr;
1958                 rx_itr = rxr->itr;
1959
1960                 /* Adjust latency range */
1961                 switch (rxr->latency) {
1962                 case IXL_LOW_LATENCY:
1963                         if (rx_bytes > 10) {
1964                                 rx_latency = IXL_AVE_LATENCY;
1965                                 rx_itr = IXL_ITR_20K;
1966                         }
1967                         break;
1968                 case IXL_AVE_LATENCY:
1969                         if (rx_bytes > 20) {
1970                                 rx_latency = IXL_BULK_LATENCY;
1971                                 rx_itr = IXL_ITR_8K;
1972                         } else if (rx_bytes <= 10) {
1973                                 rx_latency = IXL_LOW_LATENCY;
1974                                 rx_itr = IXL_ITR_100K;
1975                         }
1976                         break;
1977                 case IXL_BULK_LATENCY:
1978                         if (rx_bytes <= 20) {
1979                                 rx_latency = IXL_AVE_LATENCY;
1980                                 rx_itr = IXL_ITR_20K;
1981                         }
1982                         break;
1983                  }
1984
1985                 rxr->latency = rx_latency;
1986
1987                 if (rx_itr != rxr->itr) {
1988                         /* do an exponential smoothing */
1989                         rx_itr = (10 * rx_itr * rxr->itr) /
1990                             ((9 * rx_itr) + rxr->itr);
1991                         rxr->itr = rx_itr & IXL_MAX_ITR;
1992                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1993                             que->me), rxr->itr);
1994                 }
1995         } else { /* We may have have toggled to non-dynamic */
1996                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1997                         vsi->rx_itr_setting = ixlv_rx_itr;
1998                 /* Update the hardware if needed */
1999                 if (rxr->itr != vsi->rx_itr_setting) {
2000                         rxr->itr = vsi->rx_itr_setting;
2001                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2002                             que->me), rxr->itr);
2003                 }
2004         }
2005         rxr->bytes = 0;
2006         rxr->packets = 0;
2007         return;
2008 }
2009
2010
2011 /*
2012 ** Provide a update to the queue TX
2013 ** interrupt moderation value.
2014 */
2015 static void
2016 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2017 {
2018         struct ixl_vsi  *vsi = que->vsi;
2019         struct i40e_hw  *hw = vsi->hw;
2020         struct tx_ring  *txr = &que->txr;
2021         u16             tx_itr;
2022         u16             tx_latency = 0;
2023         int             tx_bytes;
2024
2025
2026         /* Idle, do nothing */
2027         if (txr->bytes == 0)
2028                 return;
2029
2030         if (ixlv_dynamic_tx_itr) {
2031                 tx_bytes = txr->bytes/txr->itr;
2032                 tx_itr = txr->itr;
2033
2034                 switch (txr->latency) {
2035                 case IXL_LOW_LATENCY:
2036                         if (tx_bytes > 10) {
2037                                 tx_latency = IXL_AVE_LATENCY;
2038                                 tx_itr = IXL_ITR_20K;
2039                         }
2040                         break;
2041                 case IXL_AVE_LATENCY:
2042                         if (tx_bytes > 20) {
2043                                 tx_latency = IXL_BULK_LATENCY;
2044                                 tx_itr = IXL_ITR_8K;
2045                         } else if (tx_bytes <= 10) {
2046                                 tx_latency = IXL_LOW_LATENCY;
2047                                 tx_itr = IXL_ITR_100K;
2048                         }
2049                         break;
2050                 case IXL_BULK_LATENCY:
2051                         if (tx_bytes <= 20) {
2052                                 tx_latency = IXL_AVE_LATENCY;
2053                                 tx_itr = IXL_ITR_20K;
2054                         }
2055                         break;
2056                 }
2057
2058                 txr->latency = tx_latency;
2059
2060                 if (tx_itr != txr->itr) {
2061                  /* do an exponential smoothing */
2062                         tx_itr = (10 * tx_itr * txr->itr) /
2063                             ((9 * tx_itr) + txr->itr);
2064                         txr->itr = tx_itr & IXL_MAX_ITR;
2065                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2066                             que->me), txr->itr);
2067                 }
2068
2069         } else { /* We may have have toggled to non-dynamic */
2070                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2071                         vsi->tx_itr_setting = ixlv_tx_itr;
2072                 /* Update the hardware if needed */
2073                 if (txr->itr != vsi->tx_itr_setting) {
2074                         txr->itr = vsi->tx_itr_setting;
2075                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2076                             que->me), txr->itr);
2077                 }
2078         }
2079         txr->bytes = 0;
2080         txr->packets = 0;
2081         return;
2082 }
2083
2084
2085 /*
2086 **
2087 ** MSIX Interrupt Handlers and Tasklets
2088 **
2089 */
2090 static void
2091 ixlv_handle_que(void *context, int pending)
2092 {
2093         struct ixl_queue *que = context;
2094         struct ixl_vsi *vsi = que->vsi;
2095         struct i40e_hw  *hw = vsi->hw;
2096         struct tx_ring  *txr = &que->txr;
2097         struct ifnet    *ifp = vsi->ifp;
2098         bool            more;
2099
2100         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2101                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2102                 mtx_lock(&txr->mtx);
2103                 ixl_txeof(que);
2104                 if (!drbr_empty(ifp, txr->br))
2105                         ixl_mq_start_locked(ifp, txr);
2106                 mtx_unlock(&txr->mtx);
2107                 if (more) {
2108                         taskqueue_enqueue(que->tq, &que->task);
2109                         return;
2110                 }
2111         }
2112
2113         /* Reenable this interrupt - hmmm */
2114         ixlv_enable_queue_irq(hw, que->me);
2115         return;
2116 }
2117
2118
2119 /*********************************************************************
2120  *
2121  *  MSIX Queue Interrupt Service routine
2122  *
2123  **********************************************************************/
2124 static void
2125 ixlv_msix_que(void *arg)
2126 {
2127         struct ixl_queue        *que = arg;
2128         struct ixl_vsi  *vsi = que->vsi;
2129         struct i40e_hw  *hw = vsi->hw;
2130         struct tx_ring  *txr = &que->txr;
2131         bool            more_tx, more_rx;
2132
2133         /* Spurious interrupts are ignored */
2134         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2135                 return;
2136
2137         ++que->irqs;
2138
2139         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2140
2141         mtx_lock(&txr->mtx);
2142         more_tx = ixl_txeof(que);
2143         /*
2144         ** Make certain that if the stack 
2145         ** has anything queued the task gets
2146         ** scheduled to handle it.
2147         */
2148         if (!drbr_empty(vsi->ifp, txr->br))
2149                 more_tx = 1;
2150         mtx_unlock(&txr->mtx);
2151
2152         ixlv_set_queue_rx_itr(que);
2153         ixlv_set_queue_tx_itr(que);
2154
2155         if (more_tx || more_rx)
2156                 taskqueue_enqueue(que->tq, &que->task);
2157         else
2158                 ixlv_enable_queue_irq(hw, que->me);
2159
2160         return;
2161 }
2162
2163
2164 /*********************************************************************
2165  *
2166  *  Media Ioctl callback
2167  *
2168  *  This routine is called whenever the user queries the status of
2169  *  the interface using ifconfig.
2170  *
2171  **********************************************************************/
2172 static void
2173 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2174 {
2175         struct ixl_vsi          *vsi = ifp->if_softc;
2176         struct ixlv_sc  *sc = vsi->back;
2177
2178         INIT_DBG_IF(ifp, "begin");
2179
2180         mtx_lock(&sc->mtx);
2181
2182         ixlv_update_link_status(sc);
2183
2184         ifmr->ifm_status = IFM_AVALID;
2185         ifmr->ifm_active = IFM_ETHER;
2186
2187         if (!vsi->link_up) {
2188                 mtx_unlock(&sc->mtx);
2189                 INIT_DBG_IF(ifp, "end: link not up");
2190                 return;
2191         }
2192
2193         ifmr->ifm_status |= IFM_ACTIVE;
2194         /* Hardware is always full-duplex */
2195         ifmr->ifm_active |= IFM_FDX;
2196         mtx_unlock(&sc->mtx);
2197         INIT_DBG_IF(ifp, "end");
2198         return;
2199 }
2200
2201 /*********************************************************************
2202  *
2203  *  Media Ioctl callback
2204  *
2205  *  This routine is called when the user changes speed/duplex using
2206  *  media/mediopt option with ifconfig.
2207  *
2208  **********************************************************************/
2209 static int
2210 ixlv_media_change(struct ifnet * ifp)
2211 {
2212         struct ixl_vsi *vsi = ifp->if_softc;
2213         struct ifmedia *ifm = &vsi->media;
2214
2215         INIT_DBG_IF(ifp, "begin");
2216
2217         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2218                 return (EINVAL);
2219
2220         INIT_DBG_IF(ifp, "end");
2221         return (0);
2222 }
2223
2224
2225 /*********************************************************************
2226  *  Multicast Initialization
2227  *
2228  *  This routine is called by init to reset a fresh state.
2229  *
2230  **********************************************************************/
2231
2232 static void
2233 ixlv_init_multi(struct ixl_vsi *vsi)
2234 {
2235         struct ixlv_mac_filter *f;
2236         struct ixlv_sc  *sc = vsi->back;
2237         int                     mcnt = 0;
2238
2239         IOCTL_DBG_IF(vsi->ifp, "begin");
2240
2241         /* First clear any multicast filters */
2242         SLIST_FOREACH(f, sc->mac_filters, next) {
2243                 if ((f->flags & IXL_FILTER_USED)
2244                     && (f->flags & IXL_FILTER_MC)) {
2245                         f->flags |= IXL_FILTER_DEL;
2246                         mcnt++;
2247                 }
2248         }
2249         if (mcnt > 0)
2250                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2251                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2252                     sc);
2253
2254         IOCTL_DBG_IF(vsi->ifp, "end");
2255 }
2256
2257 static void
2258 ixlv_add_multi(struct ixl_vsi *vsi)
2259 {
2260         struct ifmultiaddr      *ifma;
2261         struct ifnet            *ifp = vsi->ifp;
2262         struct ixlv_sc  *sc = vsi->back;
2263         int                     mcnt = 0;
2264
2265         IOCTL_DBG_IF(ifp, "begin");
2266
2267         if_maddr_rlock(ifp);
2268         /*
2269         ** Get a count, to decide if we
2270         ** simply use multicast promiscuous.
2271         */
2272         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2273                 if (ifma->ifma_addr->sa_family != AF_LINK)
2274                         continue;
2275                 mcnt++;
2276         }
2277         if_maddr_runlock(ifp);
2278
2279         // TODO: Remove -- cannot set promiscuous mode in a VF
2280         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2281                 /* delete all multicast filters */
2282                 ixlv_init_multi(vsi);
2283                 sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2284                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2285                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2286                     sc);
2287                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2288                 return;
2289         }
2290
2291         mcnt = 0;
2292         if_maddr_rlock(ifp);
2293         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2294                 if (ifma->ifma_addr->sa_family != AF_LINK)
2295                         continue;
2296                 if (!ixlv_add_mac_filter(sc,
2297                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2298                     IXL_FILTER_MC))
2299                         mcnt++;
2300         }
2301         if_maddr_runlock(ifp);
2302         /*
2303         ** Notify AQ task that sw filters need to be
2304         ** added to hw list
2305         */
2306         if (mcnt > 0)
2307                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2308                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2309                     sc);
2310
2311         IOCTL_DBG_IF(ifp, "end");
2312 }
2313
2314 static void
2315 ixlv_del_multi(struct ixl_vsi *vsi)
2316 {
2317         struct ixlv_mac_filter *f;
2318         struct ifmultiaddr      *ifma;
2319         struct ifnet            *ifp = vsi->ifp;
2320         struct ixlv_sc  *sc = vsi->back;
2321         int                     mcnt = 0;
2322         bool            match = FALSE;
2323
2324         IOCTL_DBG_IF(ifp, "begin");
2325
2326         /* Search for removed multicast addresses */
2327         if_maddr_rlock(ifp);
2328         SLIST_FOREACH(f, sc->mac_filters, next) {
2329                 if ((f->flags & IXL_FILTER_USED)
2330                     && (f->flags & IXL_FILTER_MC)) {
2331                         /* check if mac address in filter is in sc's list */
2332                         match = FALSE;
2333                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2334                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2335                                         continue;
2336                                 u8 *mc_addr =
2337                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2338                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2339                                         match = TRUE;
2340                                         break;
2341                                 }
2342                         }
2343                         /* if this filter is not in the sc's list, remove it */
2344                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2345                                 f->flags |= IXL_FILTER_DEL;
2346                                 mcnt++;
2347                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2348                                     MAC_FORMAT_ARGS(f->macaddr));
2349                         }
2350                         else if (match == FALSE)
2351                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2352                                     MAC_FORMAT_ARGS(f->macaddr));
2353                 }
2354         }
2355         if_maddr_runlock(ifp);
2356
2357         if (mcnt > 0)
2358                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2359                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2360                     sc);
2361
2362         IOCTL_DBG_IF(ifp, "end");
2363 }
2364
2365 /*********************************************************************
2366  *  Timer routine
2367  *
2368  *  This routine checks for link status,updates statistics,
2369  *  and runs the watchdog check.
2370  *
2371  **********************************************************************/
2372
2373 static void
2374 ixlv_local_timer(void *arg)
2375 {
2376         struct ixlv_sc  *sc = arg;
2377         struct i40e_hw          *hw = &sc->hw;
2378         struct ixl_vsi          *vsi = &sc->vsi;
2379         struct ixl_queue        *que = vsi->queues;
2380         device_t                dev = sc->dev;
2381         int                     hung = 0;
2382         u32                     mask, val;
2383
2384         IXLV_CORE_LOCK_ASSERT(sc);
2385
2386         /* If Reset is in progress just bail */
2387         if (sc->init_state == IXLV_RESET_PENDING)
2388                 return;
2389
2390         /* Check for when PF triggers a VF reset */
2391         val = rd32(hw, I40E_VFGEN_RSTAT) &
2392             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2393
2394         if (val != I40E_VFR_VFACTIVE
2395             && val != I40E_VFR_COMPLETED) {
2396                 DDPRINTF(dev, "reset in progress! (%d)", val);
2397                 return;
2398         }
2399
2400         ixlv_request_stats(sc);
2401
2402         /* clean and process any events */
2403         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2404
2405         /*
2406         ** Check status on the queues for a hang
2407         */
2408         mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2409             I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2410
2411         for (int i = 0; i < vsi->num_queues; i++,que++) {
2412                 /* Any queues with outstanding work get a sw irq */
2413                 if (que->busy)
2414                         wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2415                 /*
2416                 ** Each time txeof runs without cleaning, but there
2417                 ** are uncleaned descriptors it increments busy. If
2418                 ** we get to 5 we declare it hung.
2419                 */
2420                 if (que->busy == IXL_QUEUE_HUNG) {
2421                         ++hung;
2422                         /* Mark the queue as inactive */
2423                         vsi->active_queues &= ~((u64)1 << que->me);
2424                         continue;
2425                 } else {
2426                         /* Check if we've come back from hung */
2427                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2428                                 vsi->active_queues |= ((u64)1 << que->me);
2429                 }
2430                 if (que->busy >= IXL_MAX_TX_BUSY) {
2431                         device_printf(dev,"Warning queue %d "
2432                             "appears to be hung!\n", i);
2433                         que->busy = IXL_QUEUE_HUNG;
2434                         ++hung;
2435                 }
2436         }
2437         /* Only reset when all queues show hung */
2438         if (hung == vsi->num_queues)
2439                 goto hung;
2440         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2441         return;
2442
2443 hung:
2444         device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2445         sc->init_state = IXLV_RESET_REQUIRED;
2446         ixlv_init_locked(sc);
2447 }
2448
2449 /*
2450 ** Note: this routine updates the OS on the link state
2451 **      the real check of the hardware only happens with
2452 **      a link interrupt.
2453 */
2454 void
2455 ixlv_update_link_status(struct ixlv_sc *sc)
2456 {
2457         struct ixl_vsi          *vsi = &sc->vsi;
2458         struct ifnet            *ifp = vsi->ifp;
2459         device_t                 dev = sc->dev;
2460
2461         if (vsi->link_up){ 
2462                 if (vsi->link_active == FALSE) {
2463                         if (bootverbose)
2464                                 device_printf(dev,"Link is Up, %d Gbps\n",
2465                                     (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2466                         vsi->link_active = TRUE;
2467                         if_link_state_change(ifp, LINK_STATE_UP);
2468                 }
2469         } else { /* Link down */
2470                 if (vsi->link_active == TRUE) {
2471                         if (bootverbose)
2472                                 device_printf(dev,"Link is Down\n");
2473                         if_link_state_change(ifp, LINK_STATE_DOWN);
2474                         vsi->link_active = FALSE;
2475                 }
2476         }
2477
2478         return;
2479 }
2480
2481 /*********************************************************************
2482  *
2483  *  This routine disables all traffic on the adapter by issuing a
2484  *  global reset on the MAC and deallocates TX/RX buffers.
2485  *
2486  **********************************************************************/
2487
2488 static void
2489 ixlv_stop(struct ixlv_sc *sc)
2490 {
2491         struct ifnet *ifp;
2492         int start;
2493
2494         ifp = sc->vsi.ifp;
2495         INIT_DBG_IF(ifp, "begin");
2496
2497         IXLV_CORE_LOCK_ASSERT(sc);
2498
2499         ixl_vc_flush(&sc->vc_mgr);
2500         ixlv_disable_queues(sc);
2501
2502         start = ticks;
2503         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2504             ((ticks - start) < hz/10))
2505                 ixlv_do_adminq_locked(sc);
2506
2507         /* Stop the local timer */
2508         callout_stop(&sc->timer);
2509
2510         INIT_DBG_IF(ifp, "end");
2511 }
2512
2513
2514 /*********************************************************************
2515  *
2516  *  Free all station queue structs.
2517  *
2518  **********************************************************************/
2519 static void
2520 ixlv_free_queues(struct ixl_vsi *vsi)
2521 {
2522         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2523         struct ixl_queue        *que = vsi->queues;
2524
2525         for (int i = 0; i < vsi->num_queues; i++, que++) {
2526                 struct tx_ring *txr = &que->txr;
2527                 struct rx_ring *rxr = &que->rxr;
2528         
2529                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2530                         continue;
2531                 IXL_TX_LOCK(txr);
2532                 ixl_free_que_tx(que);
2533                 if (txr->base)
2534                         i40e_free_dma_mem(&sc->hw, &txr->dma);
2535                 IXL_TX_UNLOCK(txr);
2536                 IXL_TX_LOCK_DESTROY(txr);
2537
2538                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2539                         continue;
2540                 IXL_RX_LOCK(rxr);
2541                 ixl_free_que_rx(que);
2542                 if (rxr->base)
2543                         i40e_free_dma_mem(&sc->hw, &rxr->dma);
2544                 IXL_RX_UNLOCK(rxr);
2545                 IXL_RX_LOCK_DESTROY(rxr);
2546                 
2547         }
2548         free(vsi->queues, M_DEVBUF);
2549 }
2550
2551
2552 /*
2553 ** ixlv_config_rss - setup RSS 
2554 **
2555 ** RSS keys and table are cleared on VF reset.
2556 */
2557 static void
2558 ixlv_config_rss(struct ixlv_sc *sc)
2559 {
2560         struct i40e_hw  *hw = &sc->hw;
2561         struct ixl_vsi  *vsi = &sc->vsi;
2562         u32             lut = 0;
2563         u64             set_hena = 0, hena;
2564         int             i, j, que_id;
2565 #ifdef RSS
2566         u32             rss_hash_config;
2567         u32             rss_seed[IXL_KEYSZ];
2568 #else
2569         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
2570                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2571                             0x35897377, 0x328b25e1, 0x4fa98922,
2572                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2573 #endif
2574         
2575         /* Don't set up RSS if using a single queue */
2576         if (vsi->num_queues == 1) {
2577                 wr32(hw, I40E_VFQF_HENA(0), 0);
2578                 wr32(hw, I40E_VFQF_HENA(1), 0);
2579                 ixl_flush(hw);
2580                 return;
2581         }
2582
2583 #ifdef RSS
2584         /* Fetch the configured RSS key */
2585         rss_getkey((uint8_t *) &rss_seed);
2586 #endif
2587         /* Fill out hash function seed */
2588         for (i = 0; i <= IXL_KEYSZ; i++)
2589                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2590
2591         /* Enable PCTYPES for RSS: */
2592 #ifdef RSS
2593         rss_hash_config = rss_gethashconfig();
2594         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2595                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2596         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2597                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2598         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2599                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2600         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2601                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2602         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2603                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2604         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2605                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2606         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2607                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2608 #else
2609         set_hena =
2610                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2611                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2612                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2613                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2614                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2615                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2616                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2617                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2618                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2619                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2620                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2621 #endif
2622         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2623             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2624         hena |= set_hena;
2625         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2626         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2627
2628         /* Populate the LUT with max no. of queues in round robin fashion */
2629         for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2630                 if (j == vsi->num_queues)
2631                         j = 0;
2632 #ifdef RSS
2633                 /*
2634                  * Fetch the RSS bucket id for the given indirection entry.
2635                  * Cap it at the number of configured buckets (which is
2636                  * num_queues.)
2637                  */
2638                 que_id = rss_get_indirection_to_bucket(i);
2639                 que_id = que_id % vsi->num_queues;
2640 #else
2641                 que_id = j;
2642 #endif
2643                 /* lut = 4-byte sliding window of 4 lut entries */
2644                 lut = (lut << 8) | (que_id & 0xF);
2645                 /* On i = 3, we have 4 entries in lut; write to the register */
2646                 if ((i & 3) == 3) {
2647                         wr32(hw, I40E_VFQF_HLUT(i), lut);
2648                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2649                 }
2650         }
2651         ixl_flush(hw);
2652 }
2653
2654
2655 /*
2656 ** This routine refreshes vlan filters, called by init
2657 ** it scans the filter table and then updates the AQ
2658 */
2659 static void
2660 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2661 {
2662         struct ixl_vsi                  *vsi = &sc->vsi;
2663         struct ixlv_vlan_filter *f;
2664         int                             cnt = 0;
2665
2666         if (vsi->num_vlans == 0)
2667                 return;
2668         /*
2669         ** Scan the filter table for vlan entries,
2670         ** and if found call for the AQ update.
2671         */
2672         SLIST_FOREACH(f, sc->vlan_filters, next)
2673                 if (f->flags & IXL_FILTER_ADD)
2674                         cnt++;
2675         if (cnt > 0)
2676                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2677                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2678 }
2679
2680
2681 /*
2682 ** This routine adds new MAC filters to the sc's list;
2683 ** these are later added in hardware by sending a virtual
2684 ** channel message.
2685 */
2686 static int
2687 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2688 {
2689         struct ixlv_mac_filter  *f;
2690         device_t                        dev = sc->dev;
2691
2692         /* Does one already exist? */
2693         f = ixlv_find_mac_filter(sc, macaddr);
2694         if (f != NULL) {
2695                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2696                     MAC_FORMAT_ARGS(macaddr));
2697                 return (EEXIST);
2698         }
2699
2700         /* If not, get a new empty filter */
2701         f = ixlv_get_mac_filter(sc);
2702         if (f == NULL) {
2703                 device_printf(dev, "%s: no filters available!!\n",
2704                     __func__);
2705                 return (ENOMEM);
2706         }
2707
2708         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2709             MAC_FORMAT_ARGS(macaddr));
2710
2711         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2712         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2713         f->flags |= flags;
2714         return (0);
2715 }
2716
2717 /*
2718 ** Marks a MAC filter for deletion.
2719 */
2720 static int
2721 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2722 {
2723         struct ixlv_mac_filter  *f;
2724
2725         f = ixlv_find_mac_filter(sc, macaddr);
2726         if (f == NULL)
2727                 return (ENOENT);
2728
2729         f->flags |= IXL_FILTER_DEL;
2730         return (0);
2731 }
2732
2733 /*
2734 ** Tasklet handler for MSIX Adminq interrupts
2735 **  - done outside interrupt context since it might sleep
2736 */
2737 static void
2738 ixlv_do_adminq(void *context, int pending)
2739 {
2740         struct ixlv_sc          *sc = context;
2741
2742         mtx_lock(&sc->mtx);
2743         ixlv_do_adminq_locked(sc);
2744         mtx_unlock(&sc->mtx);
2745         return;
2746 }
2747
2748 static void
2749 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2750 {
2751         struct i40e_hw                  *hw = &sc->hw;
2752         struct i40e_arq_event_info      event;
2753         struct i40e_virtchnl_msg        *v_msg;
2754         i40e_status                     ret;
2755         u16                             result = 0;
2756
2757         IXLV_CORE_LOCK_ASSERT(sc);
2758
2759         event.buf_len = IXL_AQ_BUF_SZ;
2760         event.msg_buf = sc->aq_buffer;
2761         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2762
2763         do {
2764                 ret = i40e_clean_arq_element(hw, &event, &result);
2765                 if (ret)
2766                         break;
2767                 ixlv_vc_completion(sc, v_msg->v_opcode,
2768                     v_msg->v_retval, event.msg_buf, event.msg_len);
2769                 if (result != 0)
2770                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2771         } while (result);
2772
2773         ixlv_enable_adminq_irq(hw);
2774 }
2775
2776 static void
2777 ixlv_add_sysctls(struct ixlv_sc *sc)
2778 {
2779         device_t dev = sc->dev;
2780         struct ixl_vsi *vsi = &sc->vsi;
2781         struct i40e_eth_stats *es = &vsi->eth_stats;
2782
2783         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2784         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2785         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2786
2787         struct sysctl_oid *vsi_node, *queue_node;
2788         struct sysctl_oid_list *vsi_list, *queue_list;
2789
2790 #define QUEUE_NAME_LEN 32
2791         char queue_namebuf[QUEUE_NAME_LEN];
2792
2793         struct ixl_queue *queues = vsi->queues;
2794         struct tx_ring *txr;
2795         struct rx_ring *rxr;
2796
2797         /* Driver statistics sysctls */
2798         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2799                         CTLFLAG_RD, &sc->watchdog_events,
2800                         "Watchdog timeouts");
2801         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2802                         CTLFLAG_RD, &sc->admin_irq,
2803                         "Admin Queue IRQ Handled");
2804
2805         /* VSI statistics sysctls */
2806         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2807                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
2808         vsi_list = SYSCTL_CHILDREN(vsi_node);
2809
2810         struct ixl_sysctl_info ctls[] =
2811         {
2812                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2813                 {&es->rx_unicast, "ucast_pkts_rcvd",
2814                         "Unicast Packets Received"},
2815                 {&es->rx_multicast, "mcast_pkts_rcvd",
2816                         "Multicast Packets Received"},
2817                 {&es->rx_broadcast, "bcast_pkts_rcvd",
2818                         "Broadcast Packets Received"},
2819                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
2820                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2821                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2822                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2823                 {&es->tx_multicast, "mcast_pkts_txd",
2824                         "Multicast Packets Transmitted"},
2825                 {&es->tx_broadcast, "bcast_pkts_txd",
2826                         "Broadcast Packets Transmitted"},
2827                 {&es->tx_errors, "tx_errors", "TX packet errors"},
2828                 // end
2829                 {0,0,0}
2830         };
2831         struct ixl_sysctl_info *entry = ctls;
2832         while (entry->stat != 0)
2833         {
2834                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2835                                 CTLFLAG_RD, entry->stat,
2836                                 entry->description);
2837                 entry++;
2838         }
2839
2840         /* Queue sysctls */
2841         for (int q = 0; q < vsi->num_queues; q++) {
2842                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2843                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2844                                              CTLFLAG_RD, NULL, "Queue Name");
2845                 queue_list = SYSCTL_CHILDREN(queue_node);
2846
2847                 txr = &(queues[q].txr);
2848                 rxr = &(queues[q].rxr);
2849
2850                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2851                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2852                                 "m_defrag() failed");
2853                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2854                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2855                                 "Driver dropped packets");
2856                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2857                                 CTLFLAG_RD, &(queues[q].irqs),
2858                                 "irqs on this queue");
2859                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2860                                 CTLFLAG_RD, &(queues[q].tso),
2861                                 "TSO");
2862                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2863                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2864                                 "Driver tx dma failure in xmit");
2865                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2866                                 CTLFLAG_RD, &(txr->no_desc),
2867                                 "Queue No Descriptor Available");
2868                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2869                                 CTLFLAG_RD, &(txr->total_packets),
2870                                 "Queue Packets Transmitted");
2871                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2872                                 CTLFLAG_RD, &(txr->tx_bytes),
2873                                 "Queue Bytes Transmitted");
2874                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2875                                 CTLFLAG_RD, &(rxr->rx_packets),
2876                                 "Queue Packets Received");
2877                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2878                                 CTLFLAG_RD, &(rxr->rx_bytes),
2879                                 "Queue Bytes Received");
2880
2881                 /* Examine queue state */
2882                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
2883                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2884                                 sizeof(struct ixl_queue),
2885                                 ixlv_sysctl_qtx_tail_handler, "IU",
2886                                 "Queue Transmit Descriptor Tail");
2887                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
2888                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2889                                 sizeof(struct ixl_queue),
2890                                 ixlv_sysctl_qrx_tail_handler, "IU",
2891                                 "Queue Receive Descriptor Tail");
2892         }
2893 }
2894
2895 static void
2896 ixlv_init_filters(struct ixlv_sc *sc)
2897 {
2898         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2899             M_DEVBUF, M_NOWAIT | M_ZERO);
2900         SLIST_INIT(sc->mac_filters);
2901         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2902             M_DEVBUF, M_NOWAIT | M_ZERO);
2903         SLIST_INIT(sc->vlan_filters);
2904         return;
2905 }
2906
2907 static void
2908 ixlv_free_filters(struct ixlv_sc *sc)
2909 {
2910         struct ixlv_mac_filter *f;
2911         struct ixlv_vlan_filter *v;
2912
2913         while (!SLIST_EMPTY(sc->mac_filters)) {
2914                 f = SLIST_FIRST(sc->mac_filters);
2915                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2916                 free(f, M_DEVBUF);
2917         }
2918         while (!SLIST_EMPTY(sc->vlan_filters)) {
2919                 v = SLIST_FIRST(sc->vlan_filters);
2920                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2921                 free(v, M_DEVBUF);
2922         }
2923         return;
2924 }
2925
2926 /**
2927  * ixlv_sysctl_qtx_tail_handler
2928  * Retrieves I40E_QTX_TAIL1 value from hardware
2929  * for a sysctl.
2930  */
2931 static int 
2932 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2933 {
2934         struct ixl_queue *que;
2935         int error;
2936         u32 val;
2937
2938         que = ((struct ixl_queue *)oidp->oid_arg1);
2939         if (!que) return 0;
2940
2941         val = rd32(que->vsi->hw, que->txr.tail);
2942         error = sysctl_handle_int(oidp, &val, 0, req);
2943         if (error || !req->newptr)
2944                 return error;
2945         return (0);
2946 }
2947
2948 /**
2949  * ixlv_sysctl_qrx_tail_handler
2950  * Retrieves I40E_QRX_TAIL1 value from hardware
2951  * for a sysctl.
2952  */
2953 static int 
2954 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2955 {
2956         struct ixl_queue *que;
2957         int error;
2958         u32 val;
2959
2960         que = ((struct ixl_queue *)oidp->oid_arg1);
2961         if (!que) return 0;
2962
2963         val = rd32(que->vsi->hw, que->rxr.tail);
2964         error = sysctl_handle_int(oidp, &val, 0, req);
2965         if (error || !req->newptr)
2966                 return error;
2967         return (0);
2968 }
2969