]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
MFS r349163: ixl(4)/ixlv(4): Update Intel XL710 PF and VF drivers to ixl-1.11.9 and...
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2019, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "sys/limits.h"
36
37 #include "ixl.h"
38 #include "ixlv.h"
39
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 #define IXLV_DRIVER_VERSION_MAJOR       1
44 #define IXLV_DRIVER_VERSION_MINOR       5
45 #define IXLV_DRIVER_VERSION_BUILD       8
46
47 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
48                              __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
49                              __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
50
51 /*********************************************************************
52  *  PCI Device ID Table
53  *
54  *  Used by probe to select devices to load on
55  *  Last field stores an index into ixlv_strings
56  *  Last entry must be all 0s
57  *
58  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  *********************************************************************/
60
61 static ixl_vendor_info_t ixlv_vendor_info_array[] =
62 {
63         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
66         /* required last entry */
67         {0, 0, 0, 0, 0}
68 };
69
70 /*********************************************************************
71  *  Table of branding strings
72  *********************************************************************/
73
74 static char    *ixlv_strings[] = {
75         "Intel(R) Ethernet Connection 700 Series VF Driver"
76 };
77
78
79 /*********************************************************************
80  *  Function prototypes
81  *********************************************************************/
82 static int      ixlv_probe(device_t);
83 static int      ixlv_attach(device_t);
84 static int      ixlv_detach(device_t);
85 static int      ixlv_shutdown(device_t);
86 static void     ixlv_init_locked(struct ixlv_sc *);
87 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
88 static void     ixlv_free_pci_resources(struct ixlv_sc *);
89 static int      ixlv_assign_msix(struct ixlv_sc *);
90 static int      ixlv_init_msix(struct ixlv_sc *);
91 static int      ixlv_init_taskqueue(struct ixlv_sc *);
92 static int      ixlv_setup_queues(struct ixlv_sc *);
93 static void     ixlv_config_rss(struct ixlv_sc *);
94 static void     ixlv_stop(struct ixlv_sc *);
95 static void     ixlv_add_multi(struct ixl_vsi *);
96 static void     ixlv_del_multi(struct ixl_vsi *);
97 static void     ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
98 static void     ixlv_free_queues(struct ixl_vsi *);
99 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
100 static int      ixlv_teardown_adminq_msix(struct ixlv_sc *);
101
102 static int      ixlv_media_change(struct ifnet *);
103 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
104
105 static void     ixlv_local_timer(void *);
106
107 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
108 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
109 static void     ixlv_init_filters(struct ixlv_sc *);
110 static void     ixlv_free_filters(struct ixlv_sc *);
111
112 static void     ixlv_msix_que(void *);
113 static void     ixlv_msix_adminq(void *);
114 static void     ixlv_do_adminq(void *, int);
115 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
116 static void     ixlv_handle_que(void *, int);
117 static int      ixlv_reset(struct ixlv_sc *);
118 static int      ixlv_reset_complete(struct i40e_hw *);
119 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
120 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
121 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
122                     enum i40e_status_code);
123 static void     ixlv_configure_itr(struct ixlv_sc *);
124
125 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
126 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
127 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
128 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
129
130 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
131 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
132 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
133
134 static void     ixlv_init_hw(struct ixlv_sc *);
135 static int      ixlv_setup_vc(struct ixlv_sc *);
136 static int      ixlv_vf_config(struct ixlv_sc *);
137
138 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
139                     struct ifnet *, int);
140
141 static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
142 static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
143
144 static void     ixlv_add_sysctls(struct ixlv_sc *);
145 #ifdef IXL_DEBUG
146 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
147 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
148 #endif
149
150 /*********************************************************************
151  *  FreeBSD Device Interface Entry Points
152  *********************************************************************/
153
154 static device_method_t ixlv_methods[] = {
155         /* Device interface */
156         DEVMETHOD(device_probe, ixlv_probe),
157         DEVMETHOD(device_attach, ixlv_attach),
158         DEVMETHOD(device_detach, ixlv_detach),
159         DEVMETHOD(device_shutdown, ixlv_shutdown),
160         {0, 0}
161 };
162
163 static driver_t ixlv_driver = {
164         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
165 };
166
167 devclass_t ixlv_devclass;
168 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
169
170 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
171 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
172
173 /*
174 ** TUNEABLE PARAMETERS:
175 */
176
177 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
178                    "IXLV driver parameters");
179
180 /*
181 ** Number of descriptors per ring:
182 ** - TX and RX sizes are independently configurable
183 */
184 static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
185 TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
186 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
187     &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
188
189 static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
190 TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
191 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
192     &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
193
194 /* Set to zero to auto calculate  */
195 int ixlv_max_queues = 0;
196 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
197 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
198     &ixlv_max_queues, 0, "Number of Queues");
199
200 /*
201 ** Number of entries in Tx queue buf_ring.
202 ** Increasing this will reduce the number of
203 ** errors when transmitting fragmented UDP
204 ** packets.
205 */
206 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
207 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
208 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
209     &ixlv_txbrsz, 0, "TX Buf Ring Size");
210
211 /*
212  * Different method for processing TX descriptor
213  * completion.
214  */
215 static int ixlv_enable_head_writeback = 0;
216 TUNABLE_INT("hw.ixlv.enable_head_writeback",
217     &ixlv_enable_head_writeback);
218 SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
219     &ixlv_enable_head_writeback, 0,
220     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
221
222 /*
223 ** Controls for Interrupt Throttling
224 **      - true/false for dynamic adjustment
225 **      - default values for static ITR
226 */
227 int ixlv_dynamic_rx_itr = 0;
228 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
229 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
230     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
231
232 int ixlv_dynamic_tx_itr = 0;
233 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
234 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
235     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
236
237 int ixlv_rx_itr = IXL_ITR_8K;
238 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
239 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
240     &ixlv_rx_itr, 0, "RX Interrupt Rate");
241
242 int ixlv_tx_itr = IXL_ITR_4K;
243 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
244 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
245     &ixlv_tx_itr, 0, "TX Interrupt Rate");
246
247         
248 /*********************************************************************
249  *  Device identification routine
250  *
251  *  ixlv_probe determines if the driver should be loaded on
252  *  the hardware based on PCI vendor/device id of the device.
253  *
254  *  return BUS_PROBE_DEFAULT on success, positive on failure
255  *********************************************************************/
256
257 static int
258 ixlv_probe(device_t dev)
259 {
260         ixl_vendor_info_t *ent;
261
262         u16     pci_vendor_id, pci_device_id;
263         u16     pci_subvendor_id, pci_subdevice_id;
264         char    device_name[256];
265
266 #if 0
267         INIT_DEBUGOUT("ixlv_probe: begin");
268 #endif
269
270         pci_vendor_id = pci_get_vendor(dev);
271         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
272                 return (ENXIO);
273
274         pci_device_id = pci_get_device(dev);
275         pci_subvendor_id = pci_get_subvendor(dev);
276         pci_subdevice_id = pci_get_subdevice(dev);
277
278         ent = ixlv_vendor_info_array;
279         while (ent->vendor_id != 0) {
280                 if ((pci_vendor_id == ent->vendor_id) &&
281                     (pci_device_id == ent->device_id) &&
282
283                     ((pci_subvendor_id == ent->subvendor_id) ||
284                      (ent->subvendor_id == 0)) &&
285
286                     ((pci_subdevice_id == ent->subdevice_id) ||
287                      (ent->subdevice_id == 0))) {
288                         sprintf(device_name, "%s, Version - %s",
289                                 ixlv_strings[ent->index],
290                                 ixlv_driver_version);
291                         device_set_desc_copy(dev, device_name);
292                         return (BUS_PROBE_DEFAULT);
293                 }
294                 ent++;
295         }
296         return (ENXIO);
297 }
298
299 /*********************************************************************
300  *  Device initialization routine
301  *
302  *  The attach entry point is called when the driver is being loaded.
303  *  This routine identifies the type of hardware, allocates all resources
304  *  and initializes the hardware.
305  *
306  *  return 0 on success, positive on failure
307  *********************************************************************/
308
309 static int
310 ixlv_attach(device_t dev)
311 {
312         struct ixlv_sc  *sc;
313         struct i40e_hw  *hw;
314         struct ixl_vsi  *vsi;
315         int             error = 0;
316
317         INIT_DBG_DEV(dev, "begin");
318
319         /* Allocate, clear, and link in our primary soft structure */
320         sc = device_get_softc(dev);
321         sc->dev = sc->osdep.dev = dev;
322         hw = &sc->hw;
323         vsi = &sc->vsi;
324         vsi->dev = dev;
325
326         /* Initialize hw struct */
327         ixlv_init_hw(sc);
328
329         /* Allocate filter lists */
330         ixlv_init_filters(sc);
331
332         /* Save this tunable */
333         vsi->enable_head_writeback = ixlv_enable_head_writeback;
334
335         /* Core Lock Init */
336         mtx_init(&sc->mtx, device_get_nameunit(dev),
337             "IXL SC Lock", MTX_DEF);
338
339         /* Set up the timer callout */
340         callout_init_mtx(&sc->timer, &sc->mtx, 0);
341
342         /* Do PCI setup - map BAR0, etc */
343         if (ixlv_allocate_pci_resources(sc)) {
344                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
345                     __func__);
346                 error = ENXIO;
347                 goto err_early;
348         }
349
350         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
351
352         error = i40e_set_mac_type(hw);
353         if (error) {
354                 device_printf(dev, "%s: set_mac_type failed: %d\n",
355                     __func__, error);
356                 goto err_pci_res;
357         }
358
359         error = ixlv_reset_complete(hw);
360         if (error) {
361                 device_printf(dev, "%s: Device is still being reset\n",
362                     __func__);
363                 goto err_pci_res;
364         }
365
366         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
367
368         error = ixlv_setup_vc(sc);
369         if (error) {
370                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
371                     __func__, error);
372                 goto err_pci_res;
373         }
374
375         INIT_DBG_DEV(dev, "PF API version verified");
376
377         /* Need API version before sending reset message */
378         error = ixlv_reset(sc);
379         if (error) {
380                 device_printf(dev, "VF reset failed; reload the driver\n");
381                 goto err_aq;
382         }
383
384         INIT_DBG_DEV(dev, "VF reset complete");
385
386         /* Ask for VF config from PF */
387         error = ixlv_vf_config(sc);
388         if (error) {
389                 device_printf(dev, "Error getting configuration from PF: %d\n",
390                     error);
391                 goto err_aq;
392         }
393
394         device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
395             sc->vf_res->num_vsis,
396             sc->vf_res->num_queue_pairs,
397             sc->vf_res->max_vectors,
398             sc->vf_res->rss_key_size,
399             sc->vf_res->rss_lut_size);
400 #ifdef IXL_DEBUG
401         device_printf(dev, "Offload flags: 0x%b\n",
402             sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
403 #endif
404
405         /* got VF config message back from PF, now we can parse it */
406         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
407                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
408                         sc->vsi_res = &sc->vf_res->vsi_res[i];
409         }
410         if (!sc->vsi_res) {
411                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
412                 error = EIO;
413                 goto err_res_buf;
414         }
415
416         INIT_DBG_DEV(dev, "Resource Acquisition complete");
417
418         /* If no mac address was assigned just make a random one */
419         if (!ixlv_check_ether_addr(hw->mac.addr)) {
420                 u8 addr[ETHER_ADDR_LEN];
421                 arc4rand(&addr, sizeof(addr), 0);
422                 addr[0] &= 0xFE;
423                 addr[0] |= 0x02;
424                 bcopy(addr, hw->mac.addr, sizeof(addr));
425         }
426
427         /* Now that the number of queues for this VF is known, set up interrupts */
428         sc->msix = ixlv_init_msix(sc);
429         /* We fail without MSIX support */
430         if (sc->msix == 0) {
431                 error = ENXIO;
432                 goto err_res_buf;
433         }
434
435         vsi->id = sc->vsi_res->vsi_id;
436         vsi->back = (void *)sc;
437         vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
438
439         ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
440
441         /* This allocates the memory and early settings */
442         if (ixlv_setup_queues(sc) != 0) {
443                 device_printf(dev, "%s: setup queues failed!\n",
444                     __func__);
445                 error = EIO;
446                 goto out;
447         }
448
449         /* Do queue interrupt setup */
450         if (ixlv_assign_msix(sc) != 0) {
451                 device_printf(dev, "%s: allocating queue interrupts failed!\n",
452                     __func__);
453                 error = ENXIO;
454                 goto out;
455         }
456
457         INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
458
459         /* Setup the stack interface */
460         if (ixlv_setup_interface(dev, sc) != 0) {
461                 device_printf(dev, "%s: setup interface failed!\n",
462                     __func__);
463                 error = EIO;
464                 goto out;
465         }
466
467         INIT_DBG_DEV(dev, "Interface setup complete");
468
469         /* Start AdminQ taskqueue */
470         ixlv_init_taskqueue(sc);
471
472         /* We expect a link state message, so schedule the AdminQ task now */
473         taskqueue_enqueue(sc->tq, &sc->aq_irq);
474
475         /* Initialize stats */
476         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
477         ixlv_add_sysctls(sc);
478
479         /* Register for VLAN events */
480         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
481             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
482         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
483             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
484
485         /* We want AQ enabled early */
486         ixlv_enable_adminq_irq(hw);
487
488         /* Set things up to run init */
489         sc->init_state = IXLV_INIT_READY;
490
491         ixl_vc_init_mgr(sc, &sc->vc_mgr);
492
493         INIT_DBG_DEV(dev, "end");
494         return (error);
495
496 out:
497         ixlv_free_queues(vsi);
498         ixlv_teardown_adminq_msix(sc);
499 err_res_buf:
500         free(sc->vf_res, M_DEVBUF);
501 err_aq:
502         i40e_shutdown_adminq(hw);
503 err_pci_res:
504         ixlv_free_pci_resources(sc);
505 err_early:
506         mtx_destroy(&sc->mtx);
507         ixlv_free_filters(sc);
508         INIT_DBG_DEV(dev, "end: error %d", error);
509         return (error);
510 }
511
512 /*********************************************************************
513  *  Device removal routine
514  *
515  *  The detach entry point is called when the driver is being removed.
516  *  This routine stops the adapter and deallocates all the resources
517  *  that were allocated for driver operation.
518  *
519  *  return 0 on success, positive on failure
520  *********************************************************************/
521
522 static int
523 ixlv_detach(device_t dev)
524 {
525         struct ixlv_sc  *sc = device_get_softc(dev);
526         struct ixl_vsi  *vsi = &sc->vsi;
527         struct i40e_hw  *hw = &sc->hw;
528         enum i40e_status_code   status;
529
530         INIT_DBG_DEV(dev, "begin");
531
532         /* Make sure VLANS are not using driver */
533         if (vsi->ifp->if_vlantrunk != NULL) {
534                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
535                 return (EBUSY);
536         }
537
538         /* Remove all the media and link information */
539         ifmedia_removeall(&sc->media);
540
541         /* Stop driver */
542         ether_ifdetach(vsi->ifp);
543         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
544                 mtx_lock(&sc->mtx);     
545                 ixlv_stop(sc);
546                 mtx_unlock(&sc->mtx);   
547         }
548
549         /* Unregister VLAN events */
550         if (vsi->vlan_attach != NULL)
551                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
552         if (vsi->vlan_detach != NULL)
553                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
554
555         /* Drain VC mgr */
556         callout_drain(&sc->vc_mgr.callout);
557
558         ixlv_disable_adminq_irq(hw);
559         ixlv_teardown_adminq_msix(sc);
560         /* Drain admin queue taskqueue */
561         taskqueue_free(sc->tq);
562         status = i40e_shutdown_adminq(&sc->hw);
563         if (status != I40E_SUCCESS) {
564                 device_printf(dev,
565                     "i40e_shutdown_adminq() failed with status %s\n",
566                     i40e_stat_str(hw, status));
567         }
568
569         if_free(vsi->ifp);
570         free(sc->vf_res, M_DEVBUF);
571         ixlv_free_queues(vsi);
572         ixlv_free_pci_resources(sc);
573         ixlv_free_filters(sc);
574
575         bus_generic_detach(dev);
576         mtx_destroy(&sc->mtx);
577         INIT_DBG_DEV(dev, "end");
578         return (0);
579 }
580
581 /*********************************************************************
582  *
583  *  Shutdown entry point
584  *
585  **********************************************************************/
586
587 static int
588 ixlv_shutdown(device_t dev)
589 {
590         struct ixlv_sc  *sc = device_get_softc(dev);
591
592         INIT_DBG_DEV(dev, "begin");
593
594         mtx_lock(&sc->mtx);     
595         ixlv_stop(sc);
596         mtx_unlock(&sc->mtx);   
597
598         INIT_DBG_DEV(dev, "end");
599         return (0);
600 }
601
602 /*
603  * Configure TXCSUM(IPV6) and TSO(4/6)
604  *      - the hardware handles these together so we
605  *        need to tweak them 
606  */
607 static void
608 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
609 {
610         /* Enable/disable TXCSUM/TSO4 */
611         if (!(ifp->if_capenable & IFCAP_TXCSUM)
612             && !(ifp->if_capenable & IFCAP_TSO4)) {
613                 if (mask & IFCAP_TXCSUM) {
614                         ifp->if_capenable |= IFCAP_TXCSUM;
615                         /* enable TXCSUM, restore TSO if previously enabled */
616                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
617                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
618                                 ifp->if_capenable |= IFCAP_TSO4;
619                         }
620                 }
621                 else if (mask & IFCAP_TSO4) {
622                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
623                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
624                         if_printf(ifp,
625                             "TSO4 requires txcsum, enabling both...\n");
626                 }
627         } else if((ifp->if_capenable & IFCAP_TXCSUM)
628             && !(ifp->if_capenable & IFCAP_TSO4)) {
629                 if (mask & IFCAP_TXCSUM)
630                         ifp->if_capenable &= ~IFCAP_TXCSUM;
631                 else if (mask & IFCAP_TSO4)
632                         ifp->if_capenable |= IFCAP_TSO4;
633         } else if((ifp->if_capenable & IFCAP_TXCSUM)
634             && (ifp->if_capenable & IFCAP_TSO4)) {
635                 if (mask & IFCAP_TXCSUM) {
636                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
637                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
638                         if_printf(ifp, 
639                             "TSO4 requires txcsum, disabling both...\n");
640                 } else if (mask & IFCAP_TSO4)
641                         ifp->if_capenable &= ~IFCAP_TSO4;
642         }
643
644         /* Enable/disable TXCSUM_IPV6/TSO6 */
645         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
646             && !(ifp->if_capenable & IFCAP_TSO6)) {
647                 if (mask & IFCAP_TXCSUM_IPV6) {
648                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
649                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
650                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
651                                 ifp->if_capenable |= IFCAP_TSO6;
652                         }
653                 } else if (mask & IFCAP_TSO6) {
654                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
655                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
656                         if_printf(ifp,
657                             "TSO6 requires txcsum6, enabling both...\n");
658                 }
659         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
660             && !(ifp->if_capenable & IFCAP_TSO6)) {
661                 if (mask & IFCAP_TXCSUM_IPV6)
662                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
663                 else if (mask & IFCAP_TSO6)
664                         ifp->if_capenable |= IFCAP_TSO6;
665         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
666             && (ifp->if_capenable & IFCAP_TSO6)) {
667                 if (mask & IFCAP_TXCSUM_IPV6) {
668                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
669                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
670                         if_printf(ifp,
671                             "TSO6 requires txcsum6, disabling both...\n");
672                 } else if (mask & IFCAP_TSO6)
673                         ifp->if_capenable &= ~IFCAP_TSO6;
674         }
675 }
676
677 /*********************************************************************
678  *  Ioctl entry point
679  *
680  *  ixlv_ioctl is called when the user wants to configure the
681  *  interface.
682  *
683  *  return 0 on success, positive on failure
684  **********************************************************************/
685
686 static int
687 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
688 {
689         struct ixl_vsi          *vsi = ifp->if_softc;
690         struct ixlv_sc  *sc = vsi->back;
691         struct ifreq            *ifr = (struct ifreq *)data;
692 #if defined(INET) || defined(INET6)
693         struct ifaddr           *ifa = (struct ifaddr *)data;
694         bool                    avoid_reset = FALSE;
695 #endif
696         int                     error = 0;
697
698
699         switch (command) {
700
701         case SIOCSIFADDR:
702 #ifdef INET
703                 if (ifa->ifa_addr->sa_family == AF_INET)
704                         avoid_reset = TRUE;
705 #endif
706 #ifdef INET6
707                 if (ifa->ifa_addr->sa_family == AF_INET6)
708                         avoid_reset = TRUE;
709 #endif
710 #if defined(INET) || defined(INET6)
711                 /*
712                 ** Calling init results in link renegotiation,
713                 ** so we avoid doing it when possible.
714                 */
715                 if (avoid_reset) {
716                         ifp->if_flags |= IFF_UP;
717                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
718                                 ixlv_init(vsi);
719 #ifdef INET
720                         if (!(ifp->if_flags & IFF_NOARP))
721                                 arp_ifinit(ifp, ifa);
722 #endif
723                 } else
724                         error = ether_ioctl(ifp, command, data);
725                 break;
726 #endif
727         case SIOCSIFMTU:
728                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
729                 mtx_lock(&sc->mtx);
730                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
731                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
732                         error = EINVAL;
733                         IOCTL_DBG_IF(ifp, "mtu too large");
734                 } else {
735                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
736                         // ERJ: Interestingly enough, these types don't match
737                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
738                         vsi->max_frame_size =
739                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
740                             + ETHER_VLAN_ENCAP_LEN;
741                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
742                                 ixlv_init_locked(sc);
743                 }
744                 mtx_unlock(&sc->mtx);
745                 break;
746         case SIOCSIFFLAGS:
747                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
748                 mtx_lock(&sc->mtx);
749                 if (ifp->if_flags & IFF_UP) {
750                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
751                                 ixlv_init_locked(sc);
752                 } else
753                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
754                                 ixlv_stop(sc);
755                 sc->if_flags = ifp->if_flags;
756                 mtx_unlock(&sc->mtx);
757                 break;
758         case SIOCADDMULTI:
759                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
760                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
761                         mtx_lock(&sc->mtx);
762                         ixlv_disable_intr(vsi);
763                         ixlv_add_multi(vsi);
764                         ixlv_enable_intr(vsi);
765                         mtx_unlock(&sc->mtx);
766                 }
767                 break;
768         case SIOCDELMULTI:
769                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
770                 if (sc->init_state == IXLV_RUNNING) {
771                         mtx_lock(&sc->mtx);
772                         ixlv_disable_intr(vsi);
773                         ixlv_del_multi(vsi);
774                         ixlv_enable_intr(vsi);
775                         mtx_unlock(&sc->mtx);
776                 }
777                 break;
778         case SIOCSIFMEDIA:
779         case SIOCGIFMEDIA:
780                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
781                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
782                 break;
783         case SIOCSIFCAP:
784         {
785                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
786                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
787
788                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
789
790                 if (mask & IFCAP_RXCSUM)
791                         ifp->if_capenable ^= IFCAP_RXCSUM;
792                 if (mask & IFCAP_RXCSUM_IPV6)
793                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
794                 if (mask & IFCAP_LRO)
795                         ifp->if_capenable ^= IFCAP_LRO;
796                 if (mask & IFCAP_VLAN_HWTAGGING)
797                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
798                 if (mask & IFCAP_VLAN_HWFILTER)
799                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
800                 if (mask & IFCAP_VLAN_HWTSO)
801                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
802                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
803                         ixlv_init(vsi);
804                 }
805                 VLAN_CAPABILITIES(ifp);
806
807                 break;
808         }
809
810         default:
811                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
812                 error = ether_ioctl(ifp, command, data);
813                 break;
814         }
815
816         return (error);
817 }
818
819 /*
820 ** To do a reinit on the VF is unfortunately more complicated
821 ** than a physical device, we must have the PF more or less
822 ** completely recreate our memory, so many things that were
823 ** done only once at attach in traditional drivers now must be
824 ** redone at each reinitialization. This function does that
825 ** 'prelude' so we can then call the normal locked init code.
826 */
827 int
828 ixlv_reinit_locked(struct ixlv_sc *sc)
829 {
830         struct i40e_hw          *hw = &sc->hw;
831         struct ixl_vsi          *vsi = &sc->vsi;
832         struct ifnet            *ifp = vsi->ifp;
833         struct ixlv_mac_filter  *mf, *mf_temp;
834         struct ixlv_vlan_filter *vf;
835         int                     error = 0;
836
837         INIT_DBG_IF(ifp, "begin");
838
839         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
840                 ixlv_stop(sc);
841
842         error = ixlv_reset(sc);
843
844         INIT_DBG_IF(ifp, "VF was reset");
845
846         /* set the state in case we went thru RESET */
847         sc->init_state = IXLV_RUNNING;
848
849         /*
850         ** Resetting the VF drops all filters from hardware;
851         ** we need to mark them to be re-added in init.
852         */
853         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
854                 if (mf->flags & IXL_FILTER_DEL) {
855                         SLIST_REMOVE(sc->mac_filters, mf,
856                             ixlv_mac_filter, next);
857                         free(mf, M_DEVBUF);
858                 } else
859                         mf->flags |= IXL_FILTER_ADD;
860         }
861         if (vsi->num_vlans != 0)
862                 SLIST_FOREACH(vf, sc->vlan_filters, next)
863                         vf->flags = IXL_FILTER_ADD;
864         else { /* clean any stale filters */
865                 while (!SLIST_EMPTY(sc->vlan_filters)) {
866                         vf = SLIST_FIRST(sc->vlan_filters);
867                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
868                         free(vf, M_DEVBUF);
869                 }
870         }
871
872         ixlv_enable_adminq_irq(hw);
873         ixl_vc_flush(&sc->vc_mgr);
874
875         INIT_DBG_IF(ifp, "end");
876         return (error);
877 }
878
879 static void
880 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
881         enum i40e_status_code code)
882 {
883         struct ixlv_sc *sc;
884
885         sc = arg;
886
887         /*
888          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
889          * happens while a command is in progress, so we don't print an error
890          * in that case.
891          */
892         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
893                 if_printf(sc->vsi.ifp,
894                     "Error %s waiting for PF to complete operation %d\n",
895                     i40e_stat_str(&sc->hw, code), cmd->request);
896         }
897 }
898
899 static void
900 ixlv_init_locked(struct ixlv_sc *sc)
901 {
902         struct i40e_hw          *hw = &sc->hw;
903         struct ixl_vsi          *vsi = &sc->vsi;
904         struct ixl_queue        *que = vsi->queues;
905         struct ifnet            *ifp = vsi->ifp;
906         int                      error = 0;
907
908         INIT_DBG_IF(ifp, "begin");
909
910         IXLV_CORE_LOCK_ASSERT(sc);
911
912         /* Do a reinit first if an init has already been done */
913         if ((sc->init_state == IXLV_RUNNING) ||
914             (sc->init_state == IXLV_RESET_REQUIRED) ||
915             (sc->init_state == IXLV_RESET_PENDING))
916                 error = ixlv_reinit_locked(sc);
917         /* Don't bother with init if we failed reinit */
918         if (error)
919                 goto init_done;
920
921         /* Remove existing MAC filter if new MAC addr is set */
922         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
923                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
924                 if (error == 0)
925                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
926                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
927                             sc);
928         }
929
930         /* Check for an LAA mac address... */
931         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
932
933         ifp->if_hwassist = 0;
934         if (ifp->if_capenable & IFCAP_TSO)
935                 ifp->if_hwassist |= CSUM_TSO;
936         if (ifp->if_capenable & IFCAP_TXCSUM)
937                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
938         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
939                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
940
941         /* Add mac filter for this VF to PF */
942         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
943                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
944                 if (!error || error == EEXIST)
945                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
946                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
947                             sc);
948         }
949
950         /* Setup vlan's if needed */
951         ixlv_setup_vlan_filters(sc);
952
953         /* Prepare the queues for operation */
954         for (int i = 0; i < vsi->num_queues; i++, que++) {
955                 struct  rx_ring *rxr = &que->rxr;
956
957                 ixl_init_tx_ring(que);
958
959                 if (vsi->max_frame_size <= MCLBYTES)
960                         rxr->mbuf_sz = MCLBYTES;
961                 else
962                         rxr->mbuf_sz = MJUMPAGESIZE;
963                 ixl_init_rx_ring(que);
964         }
965
966         /* Set initial ITR values */
967         ixlv_configure_itr(sc);
968
969         /* Configure queues */
970         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
971             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
972
973         /* Set up RSS */
974         ixlv_config_rss(sc);
975
976         /* Map vectors */
977         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
978             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
979
980         /* Enable queues */
981         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
982             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
983
984         /* Start the local timer */
985         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
986
987         sc->init_state = IXLV_RUNNING;
988
989 init_done:
990         INIT_DBG_IF(ifp, "end");
991         return;
992 }
993
994 /*
995 **  Init entry point for the stack
996 */
997 void
998 ixlv_init(void *arg)
999 {
1000         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
1001         struct ixlv_sc *sc = vsi->back;
1002         int retries = 0;
1003
1004         /* Prevent init from running again while waiting for AQ calls
1005          * made in init_locked() to complete. */
1006         mtx_lock(&sc->mtx);
1007         if (sc->init_in_progress) {
1008                 mtx_unlock(&sc->mtx);
1009                 return;
1010         } else
1011                 sc->init_in_progress = true;
1012
1013         ixlv_init_locked(sc);
1014         mtx_unlock(&sc->mtx);
1015
1016         /* Wait for init_locked to finish */
1017         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
1018             && ++retries < IXLV_MAX_INIT_WAIT) {
1019                 i40e_msec_pause(25);
1020         }
1021         if (retries >= IXLV_MAX_INIT_WAIT) {
1022                 if_printf(vsi->ifp,
1023                     "Init failed to complete in allotted time!\n");
1024         }
1025
1026         mtx_lock(&sc->mtx);
1027         sc->init_in_progress = false;
1028         mtx_unlock(&sc->mtx);
1029 }
1030
1031 /*
1032  * ixlv_attach() helper function; gathers information about
1033  * the (virtual) hardware for use elsewhere in the driver.
1034  */
1035 static void
1036 ixlv_init_hw(struct ixlv_sc *sc)
1037 {
1038         struct i40e_hw *hw = &sc->hw;
1039         device_t dev = sc->dev;
1040         
1041         /* Save off the information about this board */
1042         hw->vendor_id = pci_get_vendor(dev);
1043         hw->device_id = pci_get_device(dev);
1044         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1045         hw->subsystem_vendor_id =
1046             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1047         hw->subsystem_device_id =
1048             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1049
1050         hw->bus.device = pci_get_slot(dev);
1051         hw->bus.func = pci_get_function(dev);
1052 }
1053
1054 /*
1055  * ixlv_attach() helper function; initalizes the admin queue
1056  * and attempts to establish contact with the PF by
1057  * retrying the initial "API version" message several times
1058  * or until the PF responds.
1059  */
1060 static int
1061 ixlv_setup_vc(struct ixlv_sc *sc)
1062 {
1063         struct i40e_hw *hw = &sc->hw;
1064         device_t dev = sc->dev;
1065         int error = 0, ret_error = 0, asq_retries = 0;
1066         bool send_api_ver_retried = 0;
1067
1068         /* Need to set these AQ paramters before initializing AQ */
1069         hw->aq.num_arq_entries = IXL_AQ_LEN;
1070         hw->aq.num_asq_entries = IXL_AQ_LEN;
1071         hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1072         hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1073
1074         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1075                 /* Initialize admin queue */
1076                 error = i40e_init_adminq(hw);
1077                 if (error) {
1078                         device_printf(dev, "%s: init_adminq failed: %d\n",
1079                             __func__, error);
1080                         ret_error = 1;
1081                         continue;
1082                 }
1083
1084                 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1085                     " send_api_ver attempt %d", i+1);
1086
1087 retry_send:
1088                 /* Send VF's API version */
1089                 error = ixlv_send_api_ver(sc);
1090                 if (error) {
1091                         i40e_shutdown_adminq(hw);
1092                         ret_error = 2;
1093                         device_printf(dev, "%s: unable to send api"
1094                             " version to PF on attempt %d, error %d\n",
1095                             __func__, i+1, error);
1096                 }
1097
1098                 asq_retries = 0;
1099                 while (!i40e_asq_done(hw)) {
1100                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1101                                 i40e_shutdown_adminq(hw);
1102                                 device_printf(dev, "Admin Queue timeout "
1103                                     "(waiting for send_api_ver), %d more tries...\n",
1104                                     IXLV_AQ_MAX_ERR - (i + 1));
1105                                 ret_error = 3;
1106                                 break;
1107                         } 
1108                         i40e_msec_pause(10);
1109                 }
1110                 if (asq_retries > IXLV_AQ_MAX_ERR)
1111                         continue;
1112
1113                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1114
1115                 /* Verify that the VF accepts the PF's API version */
1116                 error = ixlv_verify_api_ver(sc);
1117                 if (error == ETIMEDOUT) {
1118                         if (!send_api_ver_retried) {
1119                                 /* Resend message, one more time */
1120                                 send_api_ver_retried = true;
1121                                 device_printf(dev,
1122                                     "%s: Timeout while verifying API version on first"
1123                                     " try!\n", __func__);
1124                                 goto retry_send;
1125                         } else {
1126                                 device_printf(dev,
1127                                     "%s: Timeout while verifying API version on second"
1128                                     " try!\n", __func__);
1129                                 ret_error = 4;
1130                                 break;
1131                         }
1132                 }
1133                 if (error) {
1134                         device_printf(dev,
1135                             "%s: Unable to verify API version,"
1136                             " error %s\n", __func__, i40e_stat_str(hw, error));
1137                         ret_error = 5;
1138                 }
1139                 break;
1140         }
1141
1142         if (ret_error >= 4)
1143                 i40e_shutdown_adminq(hw);
1144         return (ret_error);
1145 }
1146
1147 /*
1148  * ixlv_attach() helper function; asks the PF for this VF's
1149  * configuration, and saves the information if it receives it.
1150  */
1151 static int
1152 ixlv_vf_config(struct ixlv_sc *sc)
1153 {
1154         struct i40e_hw *hw = &sc->hw;
1155         device_t dev = sc->dev;
1156         int bufsz, error = 0, ret_error = 0;
1157         int asq_retries, retried = 0;
1158
1159 retry_config:
1160         error = ixlv_send_vf_config_msg(sc);
1161         if (error) {
1162                 device_printf(dev,
1163                     "%s: Unable to send VF config request, attempt %d,"
1164                     " error %d\n", __func__, retried + 1, error);
1165                 ret_error = 2;
1166         }
1167
1168         asq_retries = 0;
1169         while (!i40e_asq_done(hw)) {
1170                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1171                         device_printf(dev, "%s: Admin Queue timeout "
1172                             "(waiting for send_vf_config_msg), attempt %d\n",
1173                             __func__, retried + 1);
1174                         ret_error = 3;
1175                         goto fail;
1176                 }
1177                 i40e_msec_pause(10);
1178         }
1179
1180         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1181             retried + 1);
1182
1183         if (!sc->vf_res) {
1184                 bufsz = sizeof(struct virtchnl_vf_resource) +
1185                     (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1186                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1187                 if (!sc->vf_res) {
1188                         device_printf(dev,
1189                             "%s: Unable to allocate memory for VF configuration"
1190                             " message from PF on attempt %d\n", __func__, retried + 1);
1191                         ret_error = 1;
1192                         goto fail;
1193                 }
1194         }
1195
1196         /* Check for VF config response */
1197         error = ixlv_get_vf_config(sc);
1198         if (error == ETIMEDOUT) {
1199                 /* The 1st time we timeout, send the configuration message again */
1200                 if (!retried) {
1201                         retried++;
1202                         goto retry_config;
1203                 }
1204                 device_printf(dev,
1205                     "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1206                     __func__);
1207         }
1208         if (error) {
1209                 device_printf(dev,
1210                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1211                     __func__, retried + 1);
1212                 ret_error = 4;
1213         }
1214         goto done;
1215
1216 fail:
1217         free(sc->vf_res, M_DEVBUF);
1218 done:
1219         return (ret_error);
1220 }
1221
1222 /*
1223  * Allocate MSI/X vectors, setup the AQ vector early
1224  */
1225 static int
1226 ixlv_init_msix(struct ixlv_sc *sc)
1227 {
1228         device_t dev = sc->dev;
1229         int rid, want, vectors, queues, available;
1230         int auto_max_queues;
1231
1232         rid = PCIR_BAR(IXL_MSIX_BAR);
1233         sc->msix_mem = bus_alloc_resource_any(dev,
1234             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1235         if (!sc->msix_mem) {
1236                 /* May not be enabled */
1237                 device_printf(sc->dev,
1238                     "Unable to map MSIX table\n");
1239                 goto fail;
1240         }
1241
1242         available = pci_msix_count(dev); 
1243         if (available == 0) { /* system has msix disabled */
1244                 bus_release_resource(dev, SYS_RES_MEMORY,
1245                     rid, sc->msix_mem);
1246                 sc->msix_mem = NULL;
1247                 goto fail;
1248         }
1249
1250         /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1251         auto_max_queues = min(mp_ncpus, available - 1);
1252         /* Clamp queues to # assigned to VF by PF */
1253         auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1254
1255         /* Override with tunable value if tunable is less than autoconfig count */
1256         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1257                 queues = ixlv_max_queues;
1258         /* Use autoconfig amount if that's lower */
1259         else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1260                 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1261                     "autoconfig amount (%d)...\n",
1262                     ixlv_max_queues, auto_max_queues);
1263                 queues = auto_max_queues;
1264         }
1265         /* Limit maximum auto-configured queues to 8 if no user value is set */
1266         else
1267                 queues = min(auto_max_queues, 8);
1268
1269 #ifdef  RSS
1270         /* If we're doing RSS, clamp at the number of RSS buckets */
1271         if (queues > rss_getnumbuckets())
1272                 queues = rss_getnumbuckets();
1273 #endif
1274
1275         /*
1276         ** Want one vector (RX/TX pair) per queue
1277         ** plus an additional for the admin queue.
1278         */
1279         want = queues + 1;
1280         if (want <= available)  /* Have enough */
1281                 vectors = want;
1282         else {
1283                 device_printf(sc->dev,
1284                     "MSIX Configuration Problem, "
1285                     "%d vectors available but %d wanted!\n",
1286                     available, want);
1287                 goto fail;
1288         }
1289
1290 #ifdef RSS
1291         /*
1292         * If we're doing RSS, the number of queues needs to
1293         * match the number of RSS buckets that are configured.
1294         *
1295         * + If there's more queues than RSS buckets, we'll end
1296         *   up with queues that get no traffic.
1297         *
1298         * + If there's more RSS buckets than queues, we'll end
1299         *   up having multiple RSS buckets map to the same queue,
1300         *   so there'll be some contention.
1301         */
1302         if (queues != rss_getnumbuckets()) {
1303                 device_printf(dev,
1304                     "%s: queues (%d) != RSS buckets (%d)"
1305                     "; performance will be impacted.\n",
1306                      __func__, queues, rss_getnumbuckets());
1307         }
1308 #endif
1309
1310         if (pci_alloc_msix(dev, &vectors) == 0) {
1311                 device_printf(sc->dev,
1312                     "Using MSIX interrupts with %d vectors\n", vectors);
1313                 sc->msix = vectors;
1314                 sc->vsi.num_queues = queues;
1315         }
1316
1317         /* Next we need to setup the vector for the Admin Queue */
1318         rid = 1;        /* zero vector + 1 */
1319         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1320             &rid, RF_SHAREABLE | RF_ACTIVE);
1321         if (sc->res == NULL) {
1322                 device_printf(dev, "Unable to allocate"
1323                     " bus resource: AQ interrupt \n");
1324                 goto fail;
1325         }
1326         if (bus_setup_intr(dev, sc->res,
1327             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1328             ixlv_msix_adminq, sc, &sc->tag)) {
1329                 sc->res = NULL;
1330                 device_printf(dev, "Failed to register AQ handler");
1331                 goto fail;
1332         }
1333         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1334
1335         return (vectors);
1336
1337 fail:
1338         /* The VF driver MUST use MSIX */
1339         return (0);
1340 }
1341
1342 static int
1343 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1344 {
1345         int             rid;
1346         device_t        dev = sc->dev;
1347
1348         rid = PCIR_BAR(0);
1349         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1350             &rid, RF_ACTIVE);
1351
1352         if (!(sc->pci_mem)) {
1353                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1354                 return (ENXIO);
1355         }
1356
1357         sc->osdep.mem_bus_space_tag =
1358                 rman_get_bustag(sc->pci_mem);
1359         sc->osdep.mem_bus_space_handle =
1360                 rman_get_bushandle(sc->pci_mem);
1361         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1362         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1363         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1364         sc->hw.back = &sc->osdep;
1365
1366         ixl_set_busmaster(dev);
1367         ixl_set_msix_enable(dev);
1368
1369         /* Disable adminq interrupts (just in case) */
1370         ixlv_disable_adminq_irq(&sc->hw);
1371
1372         return (0);
1373 }
1374
1375 /*
1376  * Free MSI-X related resources for a single queue
1377  */
1378 static void
1379 ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
1380 {
1381         device_t                dev = sc->dev;
1382
1383         /*
1384         **  Release all msix queue resources:
1385         */
1386         if (que->tag != NULL) {
1387                 bus_teardown_intr(dev, que->res, que->tag);
1388                 que->tag = NULL;
1389         }
1390         if (que->res != NULL) {
1391                 int rid = que->msix + 1;
1392                 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1393                 que->res = NULL;
1394         }
1395         if (que->tq != NULL) {
1396                 taskqueue_free(que->tq);
1397                 que->tq = NULL;
1398         }
1399 }
1400
1401 static void
1402 ixlv_free_pci_resources(struct ixlv_sc *sc)
1403 {
1404         device_t                dev = sc->dev;
1405
1406         pci_release_msi(dev);
1407
1408         if (sc->msix_mem != NULL)
1409                 bus_release_resource(dev, SYS_RES_MEMORY,
1410                     PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1411
1412         if (sc->pci_mem != NULL)
1413                 bus_release_resource(dev, SYS_RES_MEMORY,
1414                     PCIR_BAR(0), sc->pci_mem);
1415 }
1416
1417 /*
1418  * Create taskqueue and tasklet for Admin Queue interrupts.
1419  */
1420 static int
1421 ixlv_init_taskqueue(struct ixlv_sc *sc)
1422 {
1423         int error = 0;
1424
1425         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1426
1427         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1428             taskqueue_thread_enqueue, &sc->tq);
1429         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1430             device_get_nameunit(sc->dev));
1431
1432         return (error);
1433 }
1434
1435 /*********************************************************************
1436  *
1437  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1438  *
1439  **********************************************************************/
1440 static int
1441 ixlv_assign_msix(struct ixlv_sc *sc)
1442 {
1443         device_t        dev = sc->dev;
1444         struct          ixl_vsi *vsi = &sc->vsi;
1445         struct          ixl_queue *que = vsi->queues;
1446         struct          tx_ring  *txr;
1447         int             error, rid, vector = 1;
1448 #ifdef  RSS
1449         cpuset_t        cpu_mask;
1450 #endif
1451
1452         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1453                 int cpu_id = i;
1454                 rid = vector + 1;
1455                 txr = &que->txr;
1456                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1457                     RF_SHAREABLE | RF_ACTIVE);
1458                 if (que->res == NULL) {
1459                         device_printf(dev,"Unable to allocate"
1460                             " bus resource: que interrupt [%d]\n", vector);
1461                         return (ENXIO);
1462                 }
1463                 /* Set the handler function */
1464                 error = bus_setup_intr(dev, que->res,
1465                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1466                     ixlv_msix_que, que, &que->tag);
1467                 if (error) {
1468                         que->tag = NULL;
1469                         device_printf(dev, "Failed to register que handler");
1470                         return (error);
1471                 }
1472                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1473                 /* Bind the vector to a CPU */
1474 #ifdef RSS
1475                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1476 #endif
1477                 bus_bind_intr(dev, que->res, cpu_id);
1478                 que->msix = vector;
1479                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1480                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1481                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1482                     taskqueue_thread_enqueue, &que->tq);
1483 #ifdef RSS
1484                 CPU_SETOF(cpu_id, &cpu_mask);
1485                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1486                     &cpu_mask, "%s (bucket %d)",
1487                     device_get_nameunit(dev), cpu_id);
1488 #else
1489                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1490                     "%s que", device_get_nameunit(dev));
1491 #endif
1492
1493         }
1494
1495         return (0);
1496 }
1497
1498 /*
1499  * Special implementation of pause for reset flow because
1500  * there is a lock used.
1501  */
1502 static void
1503 ixlv_msec_pause(int msecs)
1504 {
1505         int ticks_to_pause = (msecs * hz) / 1000;
1506         int start_ticks = ticks;
1507
1508         if (cold || SCHEDULER_STOPPED()) {
1509                 i40e_msec_delay(msecs);
1510                 return;
1511         }
1512
1513         while (1) {
1514                 kern_yield(PRI_USER);
1515                 int yielded_ticks = ticks - start_ticks;
1516                 if (yielded_ticks > ticks_to_pause)
1517                         break;
1518                 else if (yielded_ticks < 0
1519                                 && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
1520                         break;
1521                 }
1522         }
1523 }
1524
1525 /*
1526 ** Requests a VF reset from the PF.
1527 **
1528 ** Requires the VF's Admin Queue to be initialized.
1529 */
1530 static int
1531 ixlv_reset(struct ixlv_sc *sc)
1532 {
1533         struct i40e_hw  *hw = &sc->hw;
1534         device_t        dev = sc->dev;
1535         int             error = 0;
1536
1537         /* Ask the PF to reset us if we are initiating */
1538         if (sc->init_state != IXLV_RESET_PENDING)
1539                 ixlv_request_reset(sc);
1540
1541         ixlv_msec_pause(100);
1542         error = ixlv_reset_complete(hw);
1543         if (error) {
1544                 device_printf(dev, "%s: VF reset failed\n",
1545                     __func__);
1546                 return (error);
1547         }
1548
1549         error = i40e_shutdown_adminq(hw);
1550         if (error) {
1551                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1552                     __func__, error);
1553                 return (error);
1554         }
1555
1556         error = i40e_init_adminq(hw);
1557         if (error) {
1558                 device_printf(dev, "%s: init_adminq failed: %d\n",
1559                     __func__, error);
1560                 return(error);
1561         }
1562
1563         return (0);
1564 }
1565
1566 static int
1567 ixlv_reset_complete(struct i40e_hw *hw)
1568 {
1569         u32 reg;
1570
1571         /* Wait up to ~10 seconds */
1572         for (int i = 0; i < 100; i++) {
1573                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1574                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1575
1576                 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1577                     (reg == VIRTCHNL_VFR_COMPLETED))
1578                         return (0);
1579                 ixlv_msec_pause(100);
1580         }
1581
1582         return (EBUSY);
1583 }
1584
1585
1586 /*********************************************************************
1587  *
1588  *  Setup networking device structure and register an interface.
1589  *
1590  **********************************************************************/
1591 static int
1592 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1593 {
1594         struct ifnet            *ifp;
1595         struct ixl_vsi          *vsi = &sc->vsi;
1596         struct ixl_queue        *que = vsi->queues;
1597
1598         INIT_DBG_DEV(dev, "begin");
1599
1600         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1601         if (ifp == NULL) {
1602                 device_printf(dev, "%s: could not allocate ifnet"
1603                     " structure!\n", __func__);
1604                 return (-1);
1605         }
1606
1607         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1608
1609         ifp->if_mtu = ETHERMTU;
1610 #if __FreeBSD_version >= 1100000
1611         ifp->if_baudrate = IF_Gbps(40);
1612 #else
1613         if_initbaudrate(ifp, IF_Gbps(40));
1614 #endif
1615         ifp->if_init = ixlv_init;
1616         ifp->if_softc = vsi;
1617         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1618         ifp->if_ioctl = ixlv_ioctl;
1619
1620 #if __FreeBSD_version >= 1100000
1621         if_setgetcounterfn(ifp, ixl_get_counter);
1622 #endif
1623
1624         ifp->if_transmit = ixl_mq_start;
1625
1626         ifp->if_qflush = ixl_qflush;
1627         ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1628
1629         ether_ifattach(ifp, sc->hw.mac.addr);
1630
1631         vsi->max_frame_size =
1632             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1633             + ETHER_VLAN_ENCAP_LEN;
1634
1635         ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1636         ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1637         ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1638
1639         /*
1640          * Tell the upper layer(s) we support long frames.
1641          */
1642         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1643
1644         ifp->if_capabilities |= IFCAP_HWCSUM;
1645         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1646         ifp->if_capabilities |= IFCAP_TSO;
1647         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1648
1649         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1650                              |  IFCAP_VLAN_HWTSO
1651                              |  IFCAP_VLAN_MTU
1652                              |  IFCAP_VLAN_HWCSUM
1653                              |  IFCAP_LRO;
1654         ifp->if_capenable = ifp->if_capabilities;
1655
1656         /*
1657         ** Don't turn this on by default, if vlans are
1658         ** created on another pseudo device (eg. lagg)
1659         ** then vlan events are not passed thru, breaking
1660         ** operation, but with HW FILTER off it works. If
1661         ** using vlans directly on the ixl driver you can
1662         ** enable this and get full hardware tag filtering.
1663         */
1664         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1665
1666         /*
1667          * Specify the media types supported by this adapter and register
1668          * callbacks to update media and link information
1669          */
1670         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1671                      ixlv_media_status);
1672
1673         /* Media types based on reported link speed over AdminQ */
1674         ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1675         ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1676         ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1677         ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1678         ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1679
1680         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1681         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1682
1683         INIT_DBG_DEV(dev, "end");
1684         return (0);
1685 }
1686
1687 /*
1688 ** Allocate and setup a single queue
1689 */
1690 static int
1691 ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
1692 {
1693         device_t                dev = sc->dev;
1694         struct tx_ring          *txr;
1695         struct rx_ring          *rxr;
1696         int                     rsize, tsize;
1697         int                     error = I40E_SUCCESS;
1698
1699         txr = &que->txr;
1700         txr->que = que;
1701         txr->tail = I40E_QTX_TAIL1(que->me);
1702         /* Initialize the TX lock */
1703         snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1704             device_get_nameunit(dev), que->me);
1705         mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1706         /*
1707          * Create the TX descriptor ring
1708          *
1709          * In Head Writeback mode, the descriptor ring is one bigger
1710          * than the number of descriptors for space for the HW to
1711          * write back index of last completed descriptor.
1712          */
1713         if (sc->vsi.enable_head_writeback) {
1714                 tsize = roundup2((que->num_tx_desc *
1715                     sizeof(struct i40e_tx_desc)) +
1716                     sizeof(u32), DBA_ALIGN);
1717         } else {
1718                 tsize = roundup2((que->num_tx_desc *
1719                     sizeof(struct i40e_tx_desc)), DBA_ALIGN);
1720         }
1721         if (i40e_allocate_dma_mem(&sc->hw,
1722             &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1723                 device_printf(dev,
1724                     "Unable to allocate TX Descriptor memory\n");
1725                 error = ENOMEM;
1726                 goto err_destroy_tx_mtx;
1727         }
1728         txr->base = (struct i40e_tx_desc *)txr->dma.va;
1729         bzero((void *)txr->base, tsize);
1730         /* Now allocate transmit soft structs for the ring */
1731         if (ixl_allocate_tx_data(que)) {
1732                 device_printf(dev,
1733                     "Critical Failure setting up TX structures\n");
1734                 error = ENOMEM;
1735                 goto err_free_tx_dma;
1736         }
1737         /* Allocate a buf ring */
1738         txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1739             M_WAITOK, &txr->mtx);
1740         if (txr->br == NULL) {
1741                 device_printf(dev,
1742                     "Critical Failure setting up TX buf ring\n");
1743                 error = ENOMEM;
1744                 goto err_free_tx_data;
1745         }
1746
1747         /*
1748          * Next the RX queues...
1749          */
1750         rsize = roundup2(que->num_rx_desc *
1751             sizeof(union i40e_rx_desc), DBA_ALIGN);
1752         rxr = &que->rxr;
1753         rxr->que = que;
1754         rxr->tail = I40E_QRX_TAIL1(que->me);
1755
1756         /* Initialize the RX side lock */
1757         snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1758             device_get_nameunit(dev), que->me);
1759         mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1760
1761         if (i40e_allocate_dma_mem(&sc->hw,
1762             &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1763                 device_printf(dev,
1764                     "Unable to allocate RX Descriptor memory\n");
1765                 error = ENOMEM;
1766                 goto err_destroy_rx_mtx;
1767         }
1768         rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1769         bzero((void *)rxr->base, rsize);
1770
1771         /* Allocate receive soft structs for the ring */
1772         if (ixl_allocate_rx_data(que)) {
1773                 device_printf(dev,
1774                     "Critical Failure setting up receive structs\n");
1775                 error = ENOMEM;
1776                 goto err_free_rx_dma;
1777         }
1778
1779         return (0);
1780
1781 err_free_rx_dma:
1782         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1783 err_destroy_rx_mtx:
1784         mtx_destroy(&rxr->mtx);
1785         /* err_free_tx_buf_ring */
1786         buf_ring_free(txr->br, M_DEVBUF);
1787 err_free_tx_data:
1788         ixl_free_que_tx(que);
1789 err_free_tx_dma:
1790         i40e_free_dma_mem(&sc->hw, &txr->dma);
1791 err_destroy_tx_mtx:
1792         mtx_destroy(&txr->mtx);
1793
1794         return (error);
1795 }
1796
1797 /*
1798 ** Allocate and setup the interface queues
1799 */
1800 static int
1801 ixlv_setup_queues(struct ixlv_sc *sc)
1802 {
1803         device_t                dev = sc->dev;
1804         struct ixl_vsi          *vsi;
1805         struct ixl_queue        *que;
1806         int                     i;
1807         int                     error = I40E_SUCCESS;
1808
1809         vsi = &sc->vsi;
1810         vsi->back = (void *)sc;
1811         vsi->hw = &sc->hw;
1812         vsi->num_vlans = 0;
1813
1814         /* Get memory for the station queues */
1815         if (!(vsi->queues =
1816                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1817                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1818                         device_printf(dev, "Unable to allocate queue memory\n");
1819                         return ENOMEM;
1820         }
1821
1822         for (i = 0; i < vsi->num_queues; i++) {
1823                 que = &vsi->queues[i];
1824                 que->num_tx_desc = vsi->num_tx_desc;
1825                 que->num_rx_desc = vsi->num_rx_desc;
1826                 que->me = i;
1827                 que->vsi = vsi;
1828
1829                 if (ixlv_setup_queue(sc, que)) {
1830                         error = ENOMEM;
1831                         goto err_free_queues;
1832                 }
1833         }
1834         sysctl_ctx_init(&vsi->sysctl_ctx);
1835
1836         return (0);
1837
1838 err_free_queues:
1839         while (i--)
1840                 ixlv_free_queue(sc, &vsi->queues[i]);
1841
1842         free(vsi->queues, M_DEVBUF);
1843
1844         return (error);
1845 }
1846
1847 /*
1848 ** This routine is run via an vlan config EVENT,
1849 ** it enables us to use the HW Filter table since
1850 ** we can get the vlan id. This just creates the
1851 ** entry in the soft version of the VFTA, init will
1852 ** repopulate the real table.
1853 */
1854 static void
1855 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1856 {
1857         struct ixl_vsi          *vsi = arg;
1858         struct ixlv_sc          *sc = vsi->back;
1859         struct ixlv_vlan_filter *v;
1860
1861
1862         if (ifp->if_softc != arg)   /* Not our event */
1863                 return;
1864
1865         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1866                 return;
1867
1868         /* Sanity check - make sure it doesn't already exist */
1869         SLIST_FOREACH(v, sc->vlan_filters, next) {
1870                 if (v->vlan == vtag)
1871                         return;
1872         }
1873
1874         mtx_lock(&sc->mtx);
1875         ++vsi->num_vlans;
1876         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1877         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1878         v->vlan = vtag;
1879         v->flags = IXL_FILTER_ADD;
1880         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1881             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1882         mtx_unlock(&sc->mtx);
1883         return;
1884 }
1885
1886 /*
1887 ** This routine is run via an vlan
1888 ** unconfig EVENT, remove our entry
1889 ** in the soft vfta.
1890 */
1891 static void
1892 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1893 {
1894         struct ixl_vsi          *vsi = arg;
1895         struct ixlv_sc          *sc = vsi->back;
1896         struct ixlv_vlan_filter *v;
1897         int                     i = 0;
1898         
1899         if (ifp->if_softc != arg)
1900                 return;
1901
1902         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1903                 return;
1904
1905         mtx_lock(&sc->mtx);
1906         SLIST_FOREACH(v, sc->vlan_filters, next) {
1907                 if (v->vlan == vtag) {
1908                         v->flags = IXL_FILTER_DEL;
1909                         ++i;
1910                         --vsi->num_vlans;
1911                 }
1912         }
1913         if (i)
1914                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1915                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1916         mtx_unlock(&sc->mtx);
1917         return;
1918 }
1919
1920 /*
1921 ** Get a new filter and add it to the mac filter list.
1922 */
1923 static struct ixlv_mac_filter *
1924 ixlv_get_mac_filter(struct ixlv_sc *sc)
1925 {
1926         struct ixlv_mac_filter  *f;
1927
1928         f = malloc(sizeof(struct ixlv_mac_filter),
1929             M_DEVBUF, M_NOWAIT | M_ZERO);
1930         if (f)
1931                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1932
1933         return (f);
1934 }
1935
1936 /*
1937 ** Find the filter with matching MAC address
1938 */
1939 static struct ixlv_mac_filter *
1940 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1941 {
1942         struct ixlv_mac_filter  *f;
1943         bool                            match = FALSE;
1944
1945         SLIST_FOREACH(f, sc->mac_filters, next) {
1946                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1947                         match = TRUE;
1948                         break;
1949                 }
1950         }       
1951
1952         if (!match)
1953                 f = NULL;
1954         return (f);
1955 }
1956
1957 static int
1958 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1959 {
1960         device_t                dev = sc->dev;
1961         int                     error = 0;
1962
1963         if (sc->tag != NULL) {
1964                 bus_teardown_intr(dev, sc->res, sc->tag);
1965                 if (error) {
1966                         device_printf(dev, "bus_teardown_intr() for"
1967                             " interrupt 0 failed\n");
1968                         // return (ENXIO);
1969                 }
1970                 sc->tag = NULL;
1971         }
1972         if (sc->res != NULL) {
1973                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1974                 if (error) {
1975                         device_printf(dev, "bus_release_resource() for"
1976                             " interrupt 0 failed\n");
1977                         // return (ENXIO);
1978                 }
1979                 sc->res = NULL;
1980         }
1981
1982         return (0);
1983
1984 }
1985
1986 /*
1987 ** Admin Queue interrupt handler
1988 */
1989 static void
1990 ixlv_msix_adminq(void *arg)
1991 {
1992         struct ixlv_sc  *sc = arg;
1993         struct i40e_hw  *hw = &sc->hw;
1994         u32             reg, mask;
1995
1996         reg = rd32(hw, I40E_VFINT_ICR01);
1997         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1998
1999         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
2000         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
2001         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
2002
2003         /* schedule task */
2004         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2005         return;
2006 }
2007
2008 void
2009 ixlv_enable_intr(struct ixl_vsi *vsi)
2010 {
2011         struct i40e_hw          *hw = vsi->hw;
2012         struct ixl_queue        *que = vsi->queues;
2013
2014         ixlv_enable_adminq_irq(hw);
2015         for (int i = 0; i < vsi->num_queues; i++, que++)
2016                 ixlv_enable_queue_irq(hw, que->me);
2017 }
2018
2019 void
2020 ixlv_disable_intr(struct ixl_vsi *vsi)
2021 {
2022         struct i40e_hw          *hw = vsi->hw;
2023         struct ixl_queue       *que = vsi->queues;
2024
2025         ixlv_disable_adminq_irq(hw);
2026         for (int i = 0; i < vsi->num_queues; i++, que++)
2027                 ixlv_disable_queue_irq(hw, que->me);
2028 }
2029
2030
2031 static void
2032 ixlv_disable_adminq_irq(struct i40e_hw *hw)
2033 {
2034         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
2035         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
2036         /* flush */
2037         rd32(hw, I40E_VFGEN_RSTAT);
2038         return;
2039 }
2040
2041 static void
2042 ixlv_enable_adminq_irq(struct i40e_hw *hw)
2043 {
2044         wr32(hw, I40E_VFINT_DYN_CTL01,
2045             I40E_VFINT_DYN_CTL01_INTENA_MASK |
2046             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
2047         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
2048         /* flush */
2049         rd32(hw, I40E_VFGEN_RSTAT);
2050         return;
2051 }
2052
2053 static void
2054 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
2055 {
2056         u32             reg;
2057
2058         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2059             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
2060             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
2061         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
2062 }
2063
2064 static void
2065 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
2066 {
2067         wr32(hw, I40E_VFINT_DYN_CTLN1(id),
2068             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2069         rd32(hw, I40E_VFGEN_RSTAT);
2070         return;
2071 }
2072
2073 /*
2074  * Get initial ITR values from tunable values.
2075  */
2076 static void
2077 ixlv_configure_itr(struct ixlv_sc *sc)
2078 {
2079         struct i40e_hw          *hw = &sc->hw;
2080         struct ixl_vsi          *vsi = &sc->vsi;
2081         struct ixl_queue        *que = vsi->queues;
2082
2083         vsi->rx_itr_setting = ixlv_rx_itr;
2084         vsi->tx_itr_setting = ixlv_tx_itr;
2085
2086         for (int i = 0; i < vsi->num_queues; i++, que++) {
2087                 struct tx_ring  *txr = &que->txr;
2088                 struct rx_ring  *rxr = &que->rxr;
2089
2090                 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
2091                     vsi->rx_itr_setting);
2092                 rxr->itr = vsi->rx_itr_setting;
2093                 rxr->latency = IXL_AVE_LATENCY;
2094
2095                 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
2096                     vsi->tx_itr_setting);
2097                 txr->itr = vsi->tx_itr_setting;
2098                 txr->latency = IXL_AVE_LATENCY;
2099         }
2100 }
2101
2102 /*
2103 ** Provide a update to the queue RX
2104 ** interrupt moderation value.
2105 */
2106 static void
2107 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2108 {
2109         struct ixl_vsi  *vsi = que->vsi;
2110         struct i40e_hw  *hw = vsi->hw;
2111         struct rx_ring  *rxr = &que->rxr;
2112         u16             rx_itr;
2113         u16             rx_latency = 0;
2114         int             rx_bytes;
2115
2116
2117         /* Idle, do nothing */
2118         if (rxr->bytes == 0)
2119                 return;
2120
2121         if (ixlv_dynamic_rx_itr) {
2122                 rx_bytes = rxr->bytes/rxr->itr;
2123                 rx_itr = rxr->itr;
2124
2125                 /* Adjust latency range */
2126                 switch (rxr->latency) {
2127                 case IXL_LOW_LATENCY:
2128                         if (rx_bytes > 10) {
2129                                 rx_latency = IXL_AVE_LATENCY;
2130                                 rx_itr = IXL_ITR_20K;
2131                         }
2132                         break;
2133                 case IXL_AVE_LATENCY:
2134                         if (rx_bytes > 20) {
2135                                 rx_latency = IXL_BULK_LATENCY;
2136                                 rx_itr = IXL_ITR_8K;
2137                         } else if (rx_bytes <= 10) {
2138                                 rx_latency = IXL_LOW_LATENCY;
2139                                 rx_itr = IXL_ITR_100K;
2140                         }
2141                         break;
2142                 case IXL_BULK_LATENCY:
2143                         if (rx_bytes <= 20) {
2144                                 rx_latency = IXL_AVE_LATENCY;
2145                                 rx_itr = IXL_ITR_20K;
2146                         }
2147                         break;
2148                  }
2149
2150                 rxr->latency = rx_latency;
2151
2152                 if (rx_itr != rxr->itr) {
2153                         /* do an exponential smoothing */
2154                         rx_itr = (10 * rx_itr * rxr->itr) /
2155                             ((9 * rx_itr) + rxr->itr);
2156                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2157                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2158                             que->me), rxr->itr);
2159                 }
2160         } else { /* We may have have toggled to non-dynamic */
2161                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2162                         vsi->rx_itr_setting = ixlv_rx_itr;
2163                 /* Update the hardware if needed */
2164                 if (rxr->itr != vsi->rx_itr_setting) {
2165                         rxr->itr = vsi->rx_itr_setting;
2166                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2167                             que->me), rxr->itr);
2168                 }
2169         }
2170         rxr->bytes = 0;
2171         rxr->packets = 0;
2172         return;
2173 }
2174
2175
2176 /*
2177 ** Provide a update to the queue TX
2178 ** interrupt moderation value.
2179 */
2180 static void
2181 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2182 {
2183         struct ixl_vsi  *vsi = que->vsi;
2184         struct i40e_hw  *hw = vsi->hw;
2185         struct tx_ring  *txr = &que->txr;
2186         u16             tx_itr;
2187         u16             tx_latency = 0;
2188         int             tx_bytes;
2189
2190
2191         /* Idle, do nothing */
2192         if (txr->bytes == 0)
2193                 return;
2194
2195         if (ixlv_dynamic_tx_itr) {
2196                 tx_bytes = txr->bytes/txr->itr;
2197                 tx_itr = txr->itr;
2198
2199                 switch (txr->latency) {
2200                 case IXL_LOW_LATENCY:
2201                         if (tx_bytes > 10) {
2202                                 tx_latency = IXL_AVE_LATENCY;
2203                                 tx_itr = IXL_ITR_20K;
2204                         }
2205                         break;
2206                 case IXL_AVE_LATENCY:
2207                         if (tx_bytes > 20) {
2208                                 tx_latency = IXL_BULK_LATENCY;
2209                                 tx_itr = IXL_ITR_8K;
2210                         } else if (tx_bytes <= 10) {
2211                                 tx_latency = IXL_LOW_LATENCY;
2212                                 tx_itr = IXL_ITR_100K;
2213                         }
2214                         break;
2215                 case IXL_BULK_LATENCY:
2216                         if (tx_bytes <= 20) {
2217                                 tx_latency = IXL_AVE_LATENCY;
2218                                 tx_itr = IXL_ITR_20K;
2219                         }
2220                         break;
2221                 }
2222
2223                 txr->latency = tx_latency;
2224
2225                 if (tx_itr != txr->itr) {
2226                  /* do an exponential smoothing */
2227                         tx_itr = (10 * tx_itr * txr->itr) /
2228                             ((9 * tx_itr) + txr->itr);
2229                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2230                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2231                             que->me), txr->itr);
2232                 }
2233
2234         } else { /* We may have have toggled to non-dynamic */
2235                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2236                         vsi->tx_itr_setting = ixlv_tx_itr;
2237                 /* Update the hardware if needed */
2238                 if (txr->itr != vsi->tx_itr_setting) {
2239                         txr->itr = vsi->tx_itr_setting;
2240                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2241                             que->me), txr->itr);
2242                 }
2243         }
2244         txr->bytes = 0;
2245         txr->packets = 0;
2246         return;
2247 }
2248
2249
2250 /*
2251 **
2252 ** MSIX Interrupt Handlers and Tasklets
2253 **
2254 */
2255 static void
2256 ixlv_handle_que(void *context, int pending)
2257 {
2258         struct ixl_queue *que = context;
2259         struct ixl_vsi *vsi = que->vsi;
2260         struct i40e_hw  *hw = vsi->hw;
2261         struct tx_ring  *txr = &que->txr;
2262         struct ifnet    *ifp = vsi->ifp;
2263         bool            more;
2264
2265         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2266                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2267                 mtx_lock(&txr->mtx);
2268                 ixl_txeof(que);
2269                 if (!drbr_empty(ifp, txr->br))
2270                         ixl_mq_start_locked(ifp, txr);
2271                 mtx_unlock(&txr->mtx);
2272                 if (more) {
2273                         taskqueue_enqueue(que->tq, &que->task);
2274                         return;
2275                 }
2276         }
2277
2278         /* Reenable this interrupt - hmmm */
2279         ixlv_enable_queue_irq(hw, que->me);
2280         return;
2281 }
2282
2283
2284 /*********************************************************************
2285  *
2286  *  MSIX Queue Interrupt Service routine
2287  *
2288  **********************************************************************/
2289 static void
2290 ixlv_msix_que(void *arg)
2291 {
2292         struct ixl_queue        *que = arg;
2293         struct ixl_vsi  *vsi = que->vsi;
2294         struct i40e_hw  *hw = vsi->hw;
2295         struct tx_ring  *txr = &que->txr;
2296         bool            more_tx, more_rx;
2297
2298         /* Spurious interrupts are ignored */
2299         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2300                 return;
2301
2302         /* There are drivers which disable auto-masking of interrupts,
2303          * which is a global setting for all ports. We have to make sure
2304          * to mask it to not lose IRQs */
2305         ixlv_disable_queue_irq(hw, que->me);
2306
2307         ++que->irqs;
2308
2309         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2310
2311         mtx_lock(&txr->mtx);
2312         more_tx = ixl_txeof(que);
2313         /*
2314         ** Make certain that if the stack 
2315         ** has anything queued the task gets
2316         ** scheduled to handle it.
2317         */
2318         if (!drbr_empty(vsi->ifp, txr->br))
2319                 more_tx = 1;
2320         mtx_unlock(&txr->mtx);
2321
2322         ixlv_set_queue_rx_itr(que);
2323         ixlv_set_queue_tx_itr(que);
2324
2325         if (more_tx || more_rx)
2326                 taskqueue_enqueue(que->tq, &que->task);
2327         else
2328                 ixlv_enable_queue_irq(hw, que->me);
2329
2330         return;
2331 }
2332
2333
2334 /*********************************************************************
2335  *
2336  *  Media Ioctl callback
2337  *
2338  *  This routine is called whenever the user queries the status of
2339  *  the interface using ifconfig.
2340  *
2341  **********************************************************************/
2342 static void
2343 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2344 {
2345         struct ixl_vsi          *vsi = ifp->if_softc;
2346         struct ixlv_sc  *sc = vsi->back;
2347
2348         INIT_DBG_IF(ifp, "begin");
2349
2350         mtx_lock(&sc->mtx);
2351
2352         ixlv_update_link_status(sc);
2353
2354         ifmr->ifm_status = IFM_AVALID;
2355         ifmr->ifm_active = IFM_ETHER;
2356
2357         if (!sc->link_up) {
2358                 mtx_unlock(&sc->mtx);
2359                 INIT_DBG_IF(ifp, "end: link not up");
2360                 return;
2361         }
2362
2363         ifmr->ifm_status |= IFM_ACTIVE;
2364         /* Hardware is always full-duplex */
2365         ifmr->ifm_active |= IFM_FDX;
2366
2367         /* Based on the link speed reported by the PF over the AdminQ, choose a
2368          * PHY type to report. This isn't 100% correct since we don't really
2369          * know the underlying PHY type of the PF, but at least we can report
2370          * a valid link speed...
2371          */
2372         switch (sc->link_speed) {
2373         case VIRTCHNL_LINK_SPEED_100MB:
2374                 ifmr->ifm_active |= IFM_100_TX;
2375                 break;
2376         case VIRTCHNL_LINK_SPEED_1GB:
2377                 ifmr->ifm_active |= IFM_1000_T;
2378                 break;
2379         case VIRTCHNL_LINK_SPEED_10GB:
2380                 ifmr->ifm_active |= IFM_10G_SR;
2381                 break;
2382         case VIRTCHNL_LINK_SPEED_20GB:
2383         case VIRTCHNL_LINK_SPEED_25GB:
2384                 ifmr->ifm_active |= IFM_25G_SR;
2385                 break;
2386         case VIRTCHNL_LINK_SPEED_40GB:
2387                 ifmr->ifm_active |= IFM_40G_SR4;
2388                 break;
2389         default:
2390                 ifmr->ifm_active |= IFM_UNKNOWN;
2391                 break;
2392         }
2393
2394         mtx_unlock(&sc->mtx);
2395         INIT_DBG_IF(ifp, "end");
2396         return;
2397 }
2398
2399 /*********************************************************************
2400  *
2401  *  Media Ioctl callback
2402  *
2403  *  This routine is called when the user changes speed/duplex using
2404  *  media/mediopt option with ifconfig.
2405  *
2406  **********************************************************************/
2407 static int
2408 ixlv_media_change(struct ifnet * ifp)
2409 {
2410         struct ixl_vsi *vsi = ifp->if_softc;
2411         struct ifmedia *ifm = &vsi->media;
2412
2413         INIT_DBG_IF(ifp, "begin");
2414
2415         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2416                 return (EINVAL);
2417
2418         if_printf(ifp, "Changing speed is not supported\n");
2419
2420         INIT_DBG_IF(ifp, "end");
2421         return (ENODEV);
2422 }
2423
2424
2425 /*********************************************************************
2426  *  Multicast Initialization
2427  *
2428  *  This routine is called by init to reset a fresh state.
2429  *
2430  **********************************************************************/
2431
2432 static void
2433 ixlv_init_multi(struct ixl_vsi *vsi)
2434 {
2435         struct ixlv_mac_filter *f;
2436         struct ixlv_sc  *sc = vsi->back;
2437         int                     mcnt = 0;
2438
2439         IOCTL_DBG_IF(vsi->ifp, "begin");
2440
2441         /* First clear any multicast filters */
2442         SLIST_FOREACH(f, sc->mac_filters, next) {
2443                 if ((f->flags & IXL_FILTER_USED)
2444                     && (f->flags & IXL_FILTER_MC)) {
2445                         f->flags |= IXL_FILTER_DEL;
2446                         mcnt++;
2447                 }
2448         }
2449         if (mcnt > 0)
2450                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2451                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2452                     sc);
2453
2454         IOCTL_DBG_IF(vsi->ifp, "end");
2455 }
2456
2457 static void
2458 ixlv_add_multi(struct ixl_vsi *vsi)
2459 {
2460         struct ifmultiaddr      *ifma;
2461         struct ifnet            *ifp = vsi->ifp;
2462         struct ixlv_sc  *sc = vsi->back;
2463         int                     mcnt = 0;
2464
2465         IOCTL_DBG_IF(ifp, "begin");
2466
2467         if_maddr_rlock(ifp);
2468         /*
2469         ** Get a count, to decide if we
2470         ** simply use multicast promiscuous.
2471         */
2472         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2473                 if (ifma->ifma_addr->sa_family != AF_LINK)
2474                         continue;
2475                 mcnt++;
2476         }
2477         if_maddr_runlock(ifp);
2478
2479         /* TODO: Remove -- cannot set promiscuous mode in a VF */
2480         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2481                 /* delete all multicast filters */
2482                 ixlv_init_multi(vsi);
2483                 sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
2484                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2485                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2486                     sc);
2487                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2488                 return;
2489         }
2490
2491         mcnt = 0;
2492         if_maddr_rlock(ifp);
2493         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2494                 if (ifma->ifma_addr->sa_family != AF_LINK)
2495                         continue;
2496                 if (!ixlv_add_mac_filter(sc,
2497                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2498                     IXL_FILTER_MC))
2499                         mcnt++;
2500         }
2501         if_maddr_runlock(ifp);
2502         /*
2503         ** Notify AQ task that sw filters need to be
2504         ** added to hw list
2505         */
2506         if (mcnt > 0)
2507                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2508                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2509                     sc);
2510
2511         IOCTL_DBG_IF(ifp, "end");
2512 }
2513
2514 static void
2515 ixlv_del_multi(struct ixl_vsi *vsi)
2516 {
2517         struct ixlv_mac_filter *f;
2518         struct ifmultiaddr      *ifma;
2519         struct ifnet            *ifp = vsi->ifp;
2520         struct ixlv_sc  *sc = vsi->back;
2521         int                     mcnt = 0;
2522         bool            match = FALSE;
2523
2524         IOCTL_DBG_IF(ifp, "begin");
2525
2526         /* Search for removed multicast addresses */
2527         if_maddr_rlock(ifp);
2528         SLIST_FOREACH(f, sc->mac_filters, next) {
2529                 if ((f->flags & IXL_FILTER_USED)
2530                     && (f->flags & IXL_FILTER_MC)) {
2531                         /* check if mac address in filter is in sc's list */
2532                         match = FALSE;
2533                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2534                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2535                                         continue;
2536                                 u8 *mc_addr =
2537                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2538                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2539                                         match = TRUE;
2540                                         break;
2541                                 }
2542                         }
2543                         /* if this filter is not in the sc's list, remove it */
2544                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2545                                 f->flags |= IXL_FILTER_DEL;
2546                                 mcnt++;
2547                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2548                                     MAC_FORMAT_ARGS(f->macaddr));
2549                         }
2550                         else if (match == FALSE)
2551                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2552                                     MAC_FORMAT_ARGS(f->macaddr));
2553                 }
2554         }
2555         if_maddr_runlock(ifp);
2556
2557         if (mcnt > 0)
2558                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2559                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2560                     sc);
2561
2562         IOCTL_DBG_IF(ifp, "end");
2563 }
2564
2565 /*********************************************************************
2566  *  Timer routine
2567  *
2568  *  This routine checks for link status,updates statistics,
2569  *  and runs the watchdog check.
2570  *
2571  **********************************************************************/
2572
2573 static void
2574 ixlv_local_timer(void *arg)
2575 {
2576         struct ixlv_sc          *sc = arg;
2577         struct i40e_hw          *hw = &sc->hw;
2578         struct ixl_vsi          *vsi = &sc->vsi;
2579         u32                     val;
2580
2581         IXLV_CORE_LOCK_ASSERT(sc);
2582
2583         /* If Reset is in progress just bail */
2584         if (sc->init_state == IXLV_RESET_PENDING)
2585                 return;
2586
2587         /* Check for when PF triggers a VF reset */
2588         val = rd32(hw, I40E_VFGEN_RSTAT) &
2589             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2590
2591         if (val != VIRTCHNL_VFR_VFACTIVE
2592             && val != VIRTCHNL_VFR_COMPLETED) {
2593                 DDPRINTF(sc->dev, "reset in progress! (%d)", val);
2594                 return;
2595         }
2596
2597         ixlv_request_stats(sc);
2598
2599         /* clean and process any events */
2600         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2601
2602         /* Increment stat when a queue shows hung */
2603         if (ixl_queue_hang_check(vsi))
2604                 sc->watchdog_events++;
2605
2606         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2607 }
2608
2609 /*
2610 ** Note: this routine updates the OS on the link state
2611 **      the real check of the hardware only happens with
2612 **      a link interrupt.
2613 */
2614 void
2615 ixlv_update_link_status(struct ixlv_sc *sc)
2616 {
2617         struct ixl_vsi          *vsi = &sc->vsi;
2618         struct ifnet            *ifp = vsi->ifp;
2619
2620         if (sc->link_up){ 
2621                 if (vsi->link_active == FALSE) {
2622                         if (bootverbose)
2623                                 if_printf(ifp,"Link is Up, %s\n",
2624                                     ixlv_vc_speed_to_string(sc->link_speed));
2625                         vsi->link_active = TRUE;
2626                         if_link_state_change(ifp, LINK_STATE_UP);
2627                 }
2628         } else { /* Link down */
2629                 if (vsi->link_active == TRUE) {
2630                         if (bootverbose)
2631                                 if_printf(ifp,"Link is Down\n");
2632                         if_link_state_change(ifp, LINK_STATE_DOWN);
2633                         vsi->link_active = FALSE;
2634                 }
2635         }
2636
2637         return;
2638 }
2639
2640 /*********************************************************************
2641  *
2642  *  This routine disables all traffic on the adapter by issuing a
2643  *  global reset on the MAC and deallocates TX/RX buffers.
2644  *
2645  **********************************************************************/
2646
2647 static void
2648 ixlv_stop(struct ixlv_sc *sc)
2649 {
2650         struct ifnet *ifp;
2651         int start;
2652
2653         ifp = sc->vsi.ifp;
2654         INIT_DBG_IF(ifp, "begin");
2655
2656         IXLV_CORE_LOCK_ASSERT(sc);
2657
2658         ixl_vc_flush(&sc->vc_mgr);
2659         ixlv_disable_queues(sc);
2660
2661         start = ticks;
2662         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2663             ((ticks - start) < hz/10))
2664                 ixlv_do_adminq_locked(sc);
2665
2666         /* Stop the local timer */
2667         callout_stop(&sc->timer);
2668
2669         INIT_DBG_IF(ifp, "end");
2670 }
2671
2672 /* Free a single queue struct */
2673 static void
2674 ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
2675 {
2676         struct tx_ring *txr = &que->txr;
2677         struct rx_ring *rxr = &que->rxr;
2678
2679         if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2680                 return;
2681         IXL_TX_LOCK(txr);
2682         if (txr->br)
2683                 buf_ring_free(txr->br, M_DEVBUF);
2684         ixl_free_que_tx(que);
2685         if (txr->base)
2686                 i40e_free_dma_mem(&sc->hw, &txr->dma);
2687         IXL_TX_UNLOCK(txr);
2688         IXL_TX_LOCK_DESTROY(txr);
2689
2690         if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2691                 return;
2692         IXL_RX_LOCK(rxr);
2693         ixl_free_que_rx(que);
2694         if (rxr->base)
2695                 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2696         IXL_RX_UNLOCK(rxr);
2697         IXL_RX_LOCK_DESTROY(rxr);
2698 }
2699
2700 /*********************************************************************
2701  *
2702  *  Free all station queue structs.
2703  *
2704  **********************************************************************/
2705 static void
2706 ixlv_free_queues(struct ixl_vsi *vsi)
2707 {
2708         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2709         struct ixl_queue        *que = vsi->queues;
2710
2711         for (int i = 0; i < vsi->num_queues; i++, que++) {
2712                 /* First, free the MSI-X resources */
2713                 ixlv_free_msix_resources(sc, que);
2714                 /* Then free other queue data */
2715                 ixlv_free_queue(sc, que);
2716         }
2717
2718         sysctl_ctx_free(&vsi->sysctl_ctx);
2719         free(vsi->queues, M_DEVBUF);
2720 }
2721
2722 static void
2723 ixlv_config_rss_reg(struct ixlv_sc *sc)
2724 {
2725         struct i40e_hw  *hw = &sc->hw;
2726         struct ixl_vsi  *vsi = &sc->vsi;
2727         u32             lut = 0;
2728         u64             set_hena = 0, hena;
2729         int             i, j, que_id;
2730         u32             rss_seed[IXL_RSS_KEY_SIZE_REG];
2731 #ifdef RSS
2732         u32             rss_hash_config;
2733 #endif
2734         
2735         /* Don't set up RSS if using a single queue */
2736         if (vsi->num_queues == 1) {
2737                 wr32(hw, I40E_VFQF_HENA(0), 0);
2738                 wr32(hw, I40E_VFQF_HENA(1), 0);
2739                 ixl_flush(hw);
2740                 return;
2741         }
2742
2743 #ifdef RSS
2744         /* Fetch the configured RSS key */
2745         rss_getkey((uint8_t *) &rss_seed);
2746 #else
2747         ixl_get_default_rss_key(rss_seed);
2748 #endif
2749
2750         /* Fill out hash function seed */
2751         for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2752                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2753
2754         /* Enable PCTYPES for RSS: */
2755 #ifdef RSS
2756         rss_hash_config = rss_gethashconfig();
2757         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2758                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2759         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2760                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2761         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2762                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2763         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2764                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2765         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2766                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2767         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2768                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2769         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2770                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2771 #else
2772         set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2773 #endif
2774         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2775             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2776         hena |= set_hena;
2777         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2778         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2779
2780         /* Populate the LUT with max no. of queues in round robin fashion */
2781         for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2782                 if (j == vsi->num_queues)
2783                         j = 0;
2784 #ifdef RSS
2785                 /*
2786                  * Fetch the RSS bucket id for the given indirection entry.
2787                  * Cap it at the number of configured buckets (which is
2788                  * num_queues.)
2789                  */
2790                 que_id = rss_get_indirection_to_bucket(i);
2791                 que_id = que_id % vsi->num_queues;
2792 #else
2793                 que_id = j;
2794 #endif
2795                 /* lut = 4-byte sliding window of 4 lut entries */
2796                 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2797                 /* On i = 3, we have 4 entries in lut; write to the register */
2798                 if ((i & 3) == 3) {
2799                         wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2800                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2801                 }
2802         }
2803         ixl_flush(hw);
2804 }
2805
2806 static void
2807 ixlv_config_rss_pf(struct ixlv_sc *sc)
2808 {
2809         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2810             IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2811
2812         ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2813             IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2814
2815         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2816             IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2817 }
2818
2819 /*
2820 ** ixlv_config_rss - setup RSS 
2821 **
2822 ** RSS keys and table are cleared on VF reset.
2823 */
2824 static void
2825 ixlv_config_rss(struct ixlv_sc *sc)
2826 {
2827         if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2828                 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2829                 ixlv_config_rss_reg(sc);
2830         } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2831                 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2832                 ixlv_config_rss_pf(sc);
2833         } else
2834                 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2835 }
2836
2837 /*
2838 ** This routine refreshes vlan filters, called by init
2839 ** it scans the filter table and then updates the AQ
2840 */
2841 static void
2842 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2843 {
2844         struct ixl_vsi                  *vsi = &sc->vsi;
2845         struct ixlv_vlan_filter *f;
2846         int                             cnt = 0;
2847
2848         if (vsi->num_vlans == 0)
2849                 return;
2850         /*
2851         ** Scan the filter table for vlan entries,
2852         ** and if found call for the AQ update.
2853         */
2854         SLIST_FOREACH(f, sc->vlan_filters, next)
2855                 if (f->flags & IXL_FILTER_ADD)
2856                         cnt++;
2857         if (cnt > 0)
2858                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2859                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2860 }
2861
2862
2863 /*
2864 ** This routine adds new MAC filters to the sc's list;
2865 ** these are later added in hardware by sending a virtual
2866 ** channel message.
2867 */
2868 static int
2869 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2870 {
2871         struct ixlv_mac_filter  *f;
2872
2873         /* Does one already exist? */
2874         f = ixlv_find_mac_filter(sc, macaddr);
2875         if (f != NULL) {
2876                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2877                     MAC_FORMAT_ARGS(macaddr));
2878                 return (EEXIST);
2879         }
2880
2881         /* If not, get a new empty filter */
2882         f = ixlv_get_mac_filter(sc);
2883         if (f == NULL) {
2884                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2885                     __func__);
2886                 return (ENOMEM);
2887         }
2888
2889         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2890             MAC_FORMAT_ARGS(macaddr));
2891
2892         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2893         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2894         f->flags |= flags;
2895         return (0);
2896 }
2897
2898 /*
2899 ** Marks a MAC filter for deletion.
2900 */
2901 static int
2902 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2903 {
2904         struct ixlv_mac_filter  *f;
2905
2906         f = ixlv_find_mac_filter(sc, macaddr);
2907         if (f == NULL)
2908                 return (ENOENT);
2909
2910         f->flags |= IXL_FILTER_DEL;
2911         return (0);
2912 }
2913
2914 /*
2915 ** Tasklet handler for MSIX Adminq interrupts
2916 **  - done outside interrupt context since it might sleep
2917 */
2918 static void
2919 ixlv_do_adminq(void *context, int pending)
2920 {
2921         struct ixlv_sc          *sc = context;
2922
2923         mtx_lock(&sc->mtx);
2924         ixlv_do_adminq_locked(sc);
2925         mtx_unlock(&sc->mtx);
2926         return;
2927 }
2928
2929 static void
2930 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2931 {
2932         struct i40e_hw                  *hw = &sc->hw;
2933         struct i40e_arq_event_info      event;
2934         struct virtchnl_msg     *v_msg;
2935         device_t                        dev = sc->dev;
2936         u16                             result = 0;
2937         u32                             reg, oldreg;
2938         i40e_status                     ret;
2939         bool                            aq_error = false;
2940
2941         IXLV_CORE_LOCK_ASSERT(sc);
2942
2943         event.buf_len = IXL_AQ_BUF_SZ;
2944         event.msg_buf = sc->aq_buffer;
2945         v_msg = (struct virtchnl_msg *)&event.desc;
2946
2947         do {
2948                 ret = i40e_clean_arq_element(hw, &event, &result);
2949                 if (ret)
2950                         break;
2951                 ixlv_vc_completion(sc, v_msg->v_opcode,
2952                     v_msg->v_retval, event.msg_buf, event.msg_len);
2953                 if (result != 0)
2954                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2955         } while (result);
2956
2957         /* check for Admin queue errors */
2958         oldreg = reg = rd32(hw, hw->aq.arq.len);
2959         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2960                 device_printf(dev, "ARQ VF Error detected\n");
2961                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2962                 aq_error = true;
2963         }
2964         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2965                 device_printf(dev, "ARQ Overflow Error detected\n");
2966                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2967                 aq_error = true;
2968         }
2969         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2970                 device_printf(dev, "ARQ Critical Error detected\n");
2971                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2972                 aq_error = true;
2973         }
2974         if (oldreg != reg)
2975                 wr32(hw, hw->aq.arq.len, reg);
2976
2977         oldreg = reg = rd32(hw, hw->aq.asq.len);
2978         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2979                 device_printf(dev, "ASQ VF Error detected\n");
2980                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2981                 aq_error = true;
2982         }
2983         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2984                 device_printf(dev, "ASQ Overflow Error detected\n");
2985                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2986                 aq_error = true;
2987         }
2988         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2989                 device_printf(dev, "ASQ Critical Error detected\n");
2990                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2991                 aq_error = true;
2992         }
2993         if (oldreg != reg)
2994                 wr32(hw, hw->aq.asq.len, reg);
2995
2996         if (aq_error) {
2997                 /* Need to reset adapter */
2998                 device_printf(dev, "WARNING: Resetting!\n");
2999                 sc->init_state = IXLV_RESET_REQUIRED;
3000                 ixlv_stop(sc);
3001                 ixlv_init_locked(sc);
3002         }
3003         ixlv_enable_adminq_irq(hw);
3004 }
3005
3006 static void
3007 ixlv_add_sysctls(struct ixlv_sc *sc)
3008 {
3009         device_t dev = sc->dev;
3010         struct ixl_vsi *vsi = &sc->vsi;
3011
3012         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3013         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3014         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3015
3016         /* Driver statistics sysctls */
3017         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
3018                         CTLFLAG_RD, &sc->watchdog_events,
3019                         "Watchdog timeouts");
3020         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
3021                         CTLFLAG_RD, &sc->admin_irq,
3022                         "Admin Queue IRQ Handled");
3023
3024         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
3025                         CTLFLAG_RD, &vsi->num_tx_desc, 0,
3026                         "TX ring size");
3027         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
3028                         CTLFLAG_RD, &vsi->num_rx_desc, 0,
3029                         "RX ring size");
3030
3031         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
3032                         CTLTYPE_STRING | CTLFLAG_RD,
3033                         sc, 0, ixlv_sysctl_current_speed,
3034                         "A", "Current Port Speed");
3035
3036         ixl_add_sysctls_eth_stats(ctx, child, &vsi->eth_stats);
3037
3038         /* VSI statistics sysctls */
3039         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3040                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
3041         ixl_vsi_add_queues_stats(vsi);
3042 }
3043
3044 static void
3045 ixlv_init_filters(struct ixlv_sc *sc)
3046 {
3047         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3048             M_DEVBUF, M_NOWAIT | M_ZERO);
3049         SLIST_INIT(sc->mac_filters);
3050         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3051             M_DEVBUF, M_NOWAIT | M_ZERO);
3052         SLIST_INIT(sc->vlan_filters);
3053         return;
3054 }
3055
3056 static void
3057 ixlv_free_filters(struct ixlv_sc *sc)
3058 {
3059         struct ixlv_mac_filter *f;
3060         struct ixlv_vlan_filter *v;
3061
3062         while (!SLIST_EMPTY(sc->mac_filters)) {
3063                 f = SLIST_FIRST(sc->mac_filters);
3064                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3065                 free(f, M_DEVBUF);
3066         }
3067         free(sc->mac_filters, M_DEVBUF);
3068         while (!SLIST_EMPTY(sc->vlan_filters)) {
3069                 v = SLIST_FIRST(sc->vlan_filters);
3070                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3071                 free(v, M_DEVBUF);
3072         }
3073         free(sc->vlan_filters, M_DEVBUF);
3074         return;
3075 }
3076
3077 static char *
3078 ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
3079 {
3080         int index;
3081
3082         char *speeds[] = {
3083                 "Unknown",
3084                 "100 Mbps",
3085                 "1 Gbps",
3086                 "10 Gbps",
3087                 "40 Gbps",
3088                 "20 Gbps",
3089                 "25 Gbps",
3090         };
3091
3092         switch (link_speed) {
3093         case VIRTCHNL_LINK_SPEED_100MB:
3094                 index = 1;
3095                 break;
3096         case VIRTCHNL_LINK_SPEED_1GB:
3097                 index = 2;
3098                 break;
3099         case VIRTCHNL_LINK_SPEED_10GB:
3100                 index = 3;
3101                 break;
3102         case VIRTCHNL_LINK_SPEED_40GB:
3103                 index = 4;
3104                 break;
3105         case VIRTCHNL_LINK_SPEED_20GB:
3106                 index = 5;
3107                 break;
3108         case VIRTCHNL_LINK_SPEED_25GB:
3109                 index = 6;
3110                 break;
3111         case VIRTCHNL_LINK_SPEED_UNKNOWN:
3112         default:
3113                 index = 0;
3114                 break;
3115         }
3116
3117         return speeds[index];
3118 }
3119
3120 static int
3121 ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3122 {
3123         struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
3124         int error = 0;
3125
3126         error = sysctl_handle_string(oidp,
3127           ixlv_vc_speed_to_string(sc->link_speed),
3128           8, req);
3129         return (error);
3130 }
3131
3132 #ifdef IXL_DEBUG
3133 /**
3134  * ixlv_sysctl_qtx_tail_handler
3135  * Retrieves I40E_QTX_TAIL1 value from hardware
3136  * for a sysctl.
3137  */
3138 static int 
3139 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3140 {
3141         struct ixl_queue *que;
3142         int error;
3143         u32 val;
3144
3145         que = ((struct ixl_queue *)oidp->oid_arg1);
3146         if (!que) return 0;
3147
3148         val = rd32(que->vsi->hw, que->txr.tail);
3149         error = sysctl_handle_int(oidp, &val, 0, req);
3150         if (error || !req->newptr)
3151                 return error;
3152         return (0);
3153 }
3154
3155 /**
3156  * ixlv_sysctl_qrx_tail_handler
3157  * Retrieves I40E_QRX_TAIL1 value from hardware
3158  * for a sysctl.
3159  */
3160 static int 
3161 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3162 {
3163         struct ixl_queue *que;
3164         int error;
3165         u32 val;
3166
3167         que = ((struct ixl_queue *)oidp->oid_arg1);
3168         if (!que) return 0;
3169
3170         val = rd32(que->vsi->hw, que->rxr.tail);
3171         error = sysctl_handle_int(oidp, &val, 0, req);
3172         if (error || !req->newptr)
3173                 return error;
3174         return (0);
3175 }
3176 #endif