]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixlv.c
ixl(4): Update to 1.9.9-k
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixlv.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2017, Intel Corporation
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "ixl.h"
36 #include "ixlv.h"
37
38 /*********************************************************************
39  *  Driver version
40  *********************************************************************/
41 #define IXLV_DRIVER_VERSION_MAJOR       1
42 #define IXLV_DRIVER_VERSION_MINOR       5
43 #define IXLV_DRIVER_VERSION_BUILD       4
44
45 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
46                              __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
47                              __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
48
49 /*********************************************************************
50  *  PCI Device ID Table
51  *
52  *  Used by probe to select devices to load on
53  *  Last field stores an index into ixlv_strings
54  *  Last entry must be all 0s
55  *
56  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57  *********************************************************************/
58
59 static ixl_vendor_info_t ixlv_vendor_info_array[] =
60 {
61         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
62         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
63         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69  *  Table of branding strings
70  *********************************************************************/
71
72 static char    *ixlv_strings[] = {
73         "Intel(R) Ethernet Connection 700 Series VF Driver"
74 };
75
76
77 /*********************************************************************
78  *  Function prototypes
79  *********************************************************************/
80 static int      ixlv_probe(device_t);
81 static int      ixlv_attach(device_t);
82 static int      ixlv_detach(device_t);
83 static int      ixlv_shutdown(device_t);
84 static void     ixlv_init_locked(struct ixlv_sc *);
85 static int      ixlv_allocate_pci_resources(struct ixlv_sc *);
86 static void     ixlv_free_pci_resources(struct ixlv_sc *);
87 static int      ixlv_assign_msix(struct ixlv_sc *);
88 static int      ixlv_init_msix(struct ixlv_sc *);
89 static int      ixlv_init_taskqueue(struct ixlv_sc *);
90 static int      ixlv_setup_queues(struct ixlv_sc *);
91 static void     ixlv_config_rss(struct ixlv_sc *);
92 static void     ixlv_stop(struct ixlv_sc *);
93 static void     ixlv_add_multi(struct ixl_vsi *);
94 static void     ixlv_del_multi(struct ixl_vsi *);
95 static void     ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
96 static void     ixlv_free_queues(struct ixl_vsi *);
97 static int      ixlv_setup_interface(device_t, struct ixlv_sc *);
98 static int      ixlv_teardown_adminq_msix(struct ixlv_sc *);
99
100 static int      ixlv_media_change(struct ifnet *);
101 static void     ixlv_media_status(struct ifnet *, struct ifmediareq *);
102
103 static void     ixlv_local_timer(void *);
104
105 static int      ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106 static int      ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107 static void     ixlv_init_filters(struct ixlv_sc *);
108 static void     ixlv_free_filters(struct ixlv_sc *);
109
110 static void     ixlv_msix_que(void *);
111 static void     ixlv_msix_adminq(void *);
112 static void     ixlv_do_adminq(void *, int);
113 static void     ixlv_do_adminq_locked(struct ixlv_sc *sc);
114 static void     ixlv_handle_que(void *, int);
115 static int      ixlv_reset(struct ixlv_sc *);
116 static int      ixlv_reset_complete(struct i40e_hw *);
117 static void     ixlv_set_queue_rx_itr(struct ixl_queue *);
118 static void     ixlv_set_queue_tx_itr(struct ixl_queue *);
119 static void     ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120                     enum i40e_status_code);
121 static void     ixlv_configure_itr(struct ixlv_sc *);
122
123 static void     ixlv_enable_adminq_irq(struct i40e_hw *);
124 static void     ixlv_disable_adminq_irq(struct i40e_hw *);
125 static void     ixlv_enable_queue_irq(struct i40e_hw *, int);
126 static void     ixlv_disable_queue_irq(struct i40e_hw *, int);
127
128 static void     ixlv_setup_vlan_filters(struct ixlv_sc *);
129 static void     ixlv_register_vlan(void *, struct ifnet *, u16);
130 static void     ixlv_unregister_vlan(void *, struct ifnet *, u16);
131
132 static void     ixlv_init_hw(struct ixlv_sc *);
133 static int      ixlv_setup_vc(struct ixlv_sc *);
134 static int      ixlv_vf_config(struct ixlv_sc *);
135
136 static void     ixlv_cap_txcsum_tso(struct ixl_vsi *,
137                     struct ifnet *, int);
138
139 static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
140 static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
141
142 static void     ixlv_add_sysctls(struct ixlv_sc *);
143 #ifdef IXL_DEBUG
144 static int      ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
145 static int      ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
146 #endif
147
148 /*********************************************************************
149  *  FreeBSD Device Interface Entry Points
150  *********************************************************************/
151
152 static device_method_t ixlv_methods[] = {
153         /* Device interface */
154         DEVMETHOD(device_probe, ixlv_probe),
155         DEVMETHOD(device_attach, ixlv_attach),
156         DEVMETHOD(device_detach, ixlv_detach),
157         DEVMETHOD(device_shutdown, ixlv_shutdown),
158         {0, 0}
159 };
160
161 static driver_t ixlv_driver = {
162         "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
163 };
164
165 devclass_t ixlv_devclass;
166 DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
167
168 MODULE_DEPEND(ixlv, pci, 1, 1, 1);
169 MODULE_DEPEND(ixlv, ether, 1, 1, 1);
170
171 /*
172 ** TUNEABLE PARAMETERS:
173 */
174
175 static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
176                    "IXLV driver parameters");
177
178 /*
179 ** Number of descriptors per ring:
180 ** - TX and RX sizes are independently configurable
181 */
182 static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
183 TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
184 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
185     &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
186
187 static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
188 TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
189 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
190     &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
191
192 /* Set to zero to auto calculate  */
193 int ixlv_max_queues = 0;
194 TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
195 SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
196     &ixlv_max_queues, 0, "Number of Queues");
197
198 /*
199 ** Number of entries in Tx queue buf_ring.
200 ** Increasing this will reduce the number of
201 ** errors when transmitting fragmented UDP
202 ** packets.
203 */
204 static int ixlv_txbrsz = DEFAULT_TXBRSZ;
205 TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
206 SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
207     &ixlv_txbrsz, 0, "TX Buf Ring Size");
208
209 /*
210  * Different method for processing TX descriptor
211  * completion.
212  */
213 static int ixlv_enable_head_writeback = 0;
214 TUNABLE_INT("hw.ixlv.enable_head_writeback",
215     &ixlv_enable_head_writeback);
216 SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217     &ixlv_enable_head_writeback, 0,
218     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
219
220 /*
221 ** Controls for Interrupt Throttling
222 **      - true/false for dynamic adjustment
223 **      - default values for static ITR
224 */
225 int ixlv_dynamic_rx_itr = 0;
226 TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
227 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
228     &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
229
230 int ixlv_dynamic_tx_itr = 0;
231 TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
232 SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
233     &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
234
235 int ixlv_rx_itr = IXL_ITR_8K;
236 TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
237 SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
238     &ixlv_rx_itr, 0, "RX Interrupt Rate");
239
240 int ixlv_tx_itr = IXL_ITR_4K;
241 TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
242 SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
243     &ixlv_tx_itr, 0, "TX Interrupt Rate");
244
245 /*********************************************************************
246  *  Device identification routine
247  *
248  *  ixlv_probe determines if the driver should be loaded on
249  *  the hardware based on PCI vendor/device id of the device.
250  *
251  *  return BUS_PROBE_DEFAULT on success, positive on failure
252  *********************************************************************/
253
254 static int
255 ixlv_probe(device_t dev)
256 {
257         ixl_vendor_info_t *ent;
258
259         u16     pci_vendor_id, pci_device_id;
260         u16     pci_subvendor_id, pci_subdevice_id;
261         char    device_name[256];
262
263 #if 0
264         INIT_DEBUGOUT("ixlv_probe: begin");
265 #endif
266
267         pci_vendor_id = pci_get_vendor(dev);
268         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
269                 return (ENXIO);
270
271         pci_device_id = pci_get_device(dev);
272         pci_subvendor_id = pci_get_subvendor(dev);
273         pci_subdevice_id = pci_get_subdevice(dev);
274
275         ent = ixlv_vendor_info_array;
276         while (ent->vendor_id != 0) {
277                 if ((pci_vendor_id == ent->vendor_id) &&
278                     (pci_device_id == ent->device_id) &&
279
280                     ((pci_subvendor_id == ent->subvendor_id) ||
281                      (ent->subvendor_id == 0)) &&
282
283                     ((pci_subdevice_id == ent->subdevice_id) ||
284                      (ent->subdevice_id == 0))) {
285                         sprintf(device_name, "%s, Version - %s",
286                                 ixlv_strings[ent->index],
287                                 ixlv_driver_version);
288                         device_set_desc_copy(dev, device_name);
289                         return (BUS_PROBE_DEFAULT);
290                 }
291                 ent++;
292         }
293         return (ENXIO);
294 }
295
296 /*********************************************************************
297  *  Device initialization routine
298  *
299  *  The attach entry point is called when the driver is being loaded.
300  *  This routine identifies the type of hardware, allocates all resources
301  *  and initializes the hardware.
302  *
303  *  return 0 on success, positive on failure
304  *********************************************************************/
305
306 static int
307 ixlv_attach(device_t dev)
308 {
309         struct ixlv_sc  *sc;
310         struct i40e_hw  *hw;
311         struct ixl_vsi  *vsi;
312         int             error = 0;
313
314         INIT_DBG_DEV(dev, "begin");
315
316         /* Allocate, clear, and link in our primary soft structure */
317         sc = device_get_softc(dev);
318         sc->dev = sc->osdep.dev = dev;
319         hw = &sc->hw;
320         vsi = &sc->vsi;
321         vsi->dev = dev;
322
323         /* Initialize hw struct */
324         ixlv_init_hw(sc);
325
326         /* Allocate filter lists */
327         ixlv_init_filters(sc);
328
329         /* Save this tunable */
330         vsi->enable_head_writeback = ixlv_enable_head_writeback;
331
332         /* Core Lock Init */
333         mtx_init(&sc->mtx, device_get_nameunit(dev),
334             "IXL SC Lock", MTX_DEF);
335
336         /* Set up the timer callout */
337         callout_init_mtx(&sc->timer, &sc->mtx, 0);
338
339         /* Do PCI setup - map BAR0, etc */
340         if (ixlv_allocate_pci_resources(sc)) {
341                 device_printf(dev, "%s: Allocation of PCI resources failed\n",
342                     __func__);
343                 error = ENXIO;
344                 goto err_early;
345         }
346
347         INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
348
349         error = i40e_set_mac_type(hw);
350         if (error) {
351                 device_printf(dev, "%s: set_mac_type failed: %d\n",
352                     __func__, error);
353                 goto err_pci_res;
354         }
355
356         error = ixlv_reset_complete(hw);
357         if (error) {
358                 device_printf(dev, "%s: Device is still being reset\n",
359                     __func__);
360                 goto err_pci_res;
361         }
362
363         INIT_DBG_DEV(dev, "VF Device is ready for configuration");
364
365         error = ixlv_setup_vc(sc);
366         if (error) {
367                 device_printf(dev, "%s: Error setting up PF comms, %d\n",
368                     __func__, error);
369                 goto err_pci_res;
370         }
371
372         INIT_DBG_DEV(dev, "PF API version verified");
373
374         /* Need API version before sending reset message */
375         error = ixlv_reset(sc);
376         if (error) {
377                 device_printf(dev, "VF reset failed; reload the driver\n");
378                 goto err_aq;
379         }
380
381         INIT_DBG_DEV(dev, "VF reset complete");
382
383         /* Ask for VF config from PF */
384         error = ixlv_vf_config(sc);
385         if (error) {
386                 device_printf(dev, "Error getting configuration from PF: %d\n",
387                     error);
388                 goto err_aq;
389         }
390
391         device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
392             sc->vf_res->num_vsis,
393             sc->vf_res->num_queue_pairs,
394             sc->vf_res->max_vectors,
395             sc->vf_res->rss_key_size,
396             sc->vf_res->rss_lut_size);
397 #ifdef IXL_DEBUG
398         device_printf(dev, "Offload flags: 0x%b\n",
399             sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
400 #endif
401
402         /* got VF config message back from PF, now we can parse it */
403         for (int i = 0; i < sc->vf_res->num_vsis; i++) {
404                 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
405                         sc->vsi_res = &sc->vf_res->vsi_res[i];
406         }
407         if (!sc->vsi_res) {
408                 device_printf(dev, "%s: no LAN VSI found\n", __func__);
409                 error = EIO;
410                 goto err_res_buf;
411         }
412
413         INIT_DBG_DEV(dev, "Resource Acquisition complete");
414
415         /* If no mac address was assigned just make a random one */
416         if (!ixlv_check_ether_addr(hw->mac.addr)) {
417                 u8 addr[ETHER_ADDR_LEN];
418                 arc4rand(&addr, sizeof(addr), 0);
419                 addr[0] &= 0xFE;
420                 addr[0] |= 0x02;
421                 bcopy(addr, hw->mac.addr, sizeof(addr));
422         }
423
424         /* Now that the number of queues for this VF is known, set up interrupts */
425         sc->msix = ixlv_init_msix(sc);
426         /* We fail without MSIX support */
427         if (sc->msix == 0) {
428                 error = ENXIO;
429                 goto err_res_buf;
430         }
431
432         vsi->id = sc->vsi_res->vsi_id;
433         vsi->back = (void *)sc;
434         vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
435
436         ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
437
438         /* This allocates the memory and early settings */
439         if (ixlv_setup_queues(sc) != 0) {
440                 device_printf(dev, "%s: setup queues failed!\n",
441                     __func__);
442                 error = EIO;
443                 goto out;
444         }
445
446         /* Do queue interrupt setup */
447         if (ixlv_assign_msix(sc) != 0) {
448                 device_printf(dev, "%s: allocating queue interrupts failed!\n",
449                     __func__);
450                 error = ENXIO;
451                 goto out;
452         }
453
454         INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
455
456         /* Setup the stack interface */
457         if (ixlv_setup_interface(dev, sc) != 0) {
458                 device_printf(dev, "%s: setup interface failed!\n",
459                     __func__);
460                 error = EIO;
461                 goto out;
462         }
463
464         INIT_DBG_DEV(dev, "Interface setup complete");
465
466         /* Start AdminQ taskqueue */
467         ixlv_init_taskqueue(sc);
468
469         /* We expect a link state message, so schedule the AdminQ task now */
470         taskqueue_enqueue(sc->tq, &sc->aq_irq);
471
472         /* Initialize stats */
473         bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
474         ixlv_add_sysctls(sc);
475
476         /* Register for VLAN events */
477         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
478             ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
479         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
480             ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
481
482         /* We want AQ enabled early */
483         ixlv_enable_adminq_irq(hw);
484
485         /* Set things up to run init */
486         sc->init_state = IXLV_INIT_READY;
487
488         ixl_vc_init_mgr(sc, &sc->vc_mgr);
489
490         INIT_DBG_DEV(dev, "end");
491         return (error);
492
493 out:
494         ixlv_free_queues(vsi);
495         ixlv_teardown_adminq_msix(sc);
496 err_res_buf:
497         free(sc->vf_res, M_DEVBUF);
498 err_aq:
499         i40e_shutdown_adminq(hw);
500 err_pci_res:
501         ixlv_free_pci_resources(sc);
502 err_early:
503         mtx_destroy(&sc->mtx);
504         ixlv_free_filters(sc);
505         INIT_DBG_DEV(dev, "end: error %d", error);
506         return (error);
507 }
508
509 /*********************************************************************
510  *  Device removal routine
511  *
512  *  The detach entry point is called when the driver is being removed.
513  *  This routine stops the adapter and deallocates all the resources
514  *  that were allocated for driver operation.
515  *
516  *  return 0 on success, positive on failure
517  *********************************************************************/
518
519 static int
520 ixlv_detach(device_t dev)
521 {
522         struct ixlv_sc  *sc = device_get_softc(dev);
523         struct ixl_vsi  *vsi = &sc->vsi;
524         struct i40e_hw  *hw = &sc->hw;
525         enum i40e_status_code   status;
526
527         INIT_DBG_DEV(dev, "begin");
528
529         /* Make sure VLANS are not using driver */
530         if (vsi->ifp->if_vlantrunk != NULL) {
531                 if_printf(vsi->ifp, "Vlan in use, detach first\n");
532                 return (EBUSY);
533         }
534
535         /* Remove all the media and link information */
536         ifmedia_removeall(&sc->media);
537
538         /* Stop driver */
539         ether_ifdetach(vsi->ifp);
540         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
541                 mtx_lock(&sc->mtx);     
542                 ixlv_stop(sc);
543                 mtx_unlock(&sc->mtx);   
544         }
545
546         /* Unregister VLAN events */
547         if (vsi->vlan_attach != NULL)
548                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
549         if (vsi->vlan_detach != NULL)
550                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
551
552         /* Drain VC mgr */
553         callout_drain(&sc->vc_mgr.callout);
554
555         ixlv_disable_adminq_irq(hw);
556         ixlv_teardown_adminq_msix(sc);
557         /* Drain admin queue taskqueue */
558         taskqueue_free(sc->tq);
559         status = i40e_shutdown_adminq(&sc->hw);
560         if (status != I40E_SUCCESS) {
561                 device_printf(dev,
562                     "i40e_shutdown_adminq() failed with status %s\n",
563                     i40e_stat_str(hw, status));
564         }
565
566         if_free(vsi->ifp);
567         free(sc->vf_res, M_DEVBUF);
568         ixlv_free_queues(vsi);
569         ixlv_free_pci_resources(sc);
570         ixlv_free_filters(sc);
571
572         bus_generic_detach(dev);
573         mtx_destroy(&sc->mtx);
574         INIT_DBG_DEV(dev, "end");
575         return (0);
576 }
577
578 /*********************************************************************
579  *
580  *  Shutdown entry point
581  *
582  **********************************************************************/
583
584 static int
585 ixlv_shutdown(device_t dev)
586 {
587         struct ixlv_sc  *sc = device_get_softc(dev);
588
589         INIT_DBG_DEV(dev, "begin");
590
591         mtx_lock(&sc->mtx);     
592         ixlv_stop(sc);
593         mtx_unlock(&sc->mtx);   
594
595         INIT_DBG_DEV(dev, "end");
596         return (0);
597 }
598
599 /*
600  * Configure TXCSUM(IPV6) and TSO(4/6)
601  *      - the hardware handles these together so we
602  *        need to tweak them 
603  */
604 static void
605 ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
606 {
607         /* Enable/disable TXCSUM/TSO4 */
608         if (!(ifp->if_capenable & IFCAP_TXCSUM)
609             && !(ifp->if_capenable & IFCAP_TSO4)) {
610                 if (mask & IFCAP_TXCSUM) {
611                         ifp->if_capenable |= IFCAP_TXCSUM;
612                         /* enable TXCSUM, restore TSO if previously enabled */
613                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
614                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
615                                 ifp->if_capenable |= IFCAP_TSO4;
616                         }
617                 }
618                 else if (mask & IFCAP_TSO4) {
619                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
620                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
621                         if_printf(ifp,
622                             "TSO4 requires txcsum, enabling both...\n");
623                 }
624         } else if((ifp->if_capenable & IFCAP_TXCSUM)
625             && !(ifp->if_capenable & IFCAP_TSO4)) {
626                 if (mask & IFCAP_TXCSUM)
627                         ifp->if_capenable &= ~IFCAP_TXCSUM;
628                 else if (mask & IFCAP_TSO4)
629                         ifp->if_capenable |= IFCAP_TSO4;
630         } else if((ifp->if_capenable & IFCAP_TXCSUM)
631             && (ifp->if_capenable & IFCAP_TSO4)) {
632                 if (mask & IFCAP_TXCSUM) {
633                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
634                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
635                         if_printf(ifp, 
636                             "TSO4 requires txcsum, disabling both...\n");
637                 } else if (mask & IFCAP_TSO4)
638                         ifp->if_capenable &= ~IFCAP_TSO4;
639         }
640
641         /* Enable/disable TXCSUM_IPV6/TSO6 */
642         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
643             && !(ifp->if_capenable & IFCAP_TSO6)) {
644                 if (mask & IFCAP_TXCSUM_IPV6) {
645                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
646                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
647                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
648                                 ifp->if_capenable |= IFCAP_TSO6;
649                         }
650                 } else if (mask & IFCAP_TSO6) {
651                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
652                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
653                         if_printf(ifp,
654                             "TSO6 requires txcsum6, enabling both...\n");
655                 }
656         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
657             && !(ifp->if_capenable & IFCAP_TSO6)) {
658                 if (mask & IFCAP_TXCSUM_IPV6)
659                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
660                 else if (mask & IFCAP_TSO6)
661                         ifp->if_capenable |= IFCAP_TSO6;
662         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
663             && (ifp->if_capenable & IFCAP_TSO6)) {
664                 if (mask & IFCAP_TXCSUM_IPV6) {
665                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
666                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
667                         if_printf(ifp,
668                             "TSO6 requires txcsum6, disabling both...\n");
669                 } else if (mask & IFCAP_TSO6)
670                         ifp->if_capenable &= ~IFCAP_TSO6;
671         }
672 }
673
674 /*********************************************************************
675  *  Ioctl entry point
676  *
677  *  ixlv_ioctl is called when the user wants to configure the
678  *  interface.
679  *
680  *  return 0 on success, positive on failure
681  **********************************************************************/
682
683 static int
684 ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
685 {
686         struct ixl_vsi          *vsi = ifp->if_softc;
687         struct ixlv_sc  *sc = vsi->back;
688         struct ifreq            *ifr = (struct ifreq *)data;
689 #if defined(INET) || defined(INET6)
690         struct ifaddr           *ifa = (struct ifaddr *)data;
691         bool                    avoid_reset = FALSE;
692 #endif
693         int                     error = 0;
694
695
696         switch (command) {
697
698         case SIOCSIFADDR:
699 #ifdef INET
700                 if (ifa->ifa_addr->sa_family == AF_INET)
701                         avoid_reset = TRUE;
702 #endif
703 #ifdef INET6
704                 if (ifa->ifa_addr->sa_family == AF_INET6)
705                         avoid_reset = TRUE;
706 #endif
707 #if defined(INET) || defined(INET6)
708                 /*
709                 ** Calling init results in link renegotiation,
710                 ** so we avoid doing it when possible.
711                 */
712                 if (avoid_reset) {
713                         ifp->if_flags |= IFF_UP;
714                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
715                                 ixlv_init(vsi);
716 #ifdef INET
717                         if (!(ifp->if_flags & IFF_NOARP))
718                                 arp_ifinit(ifp, ifa);
719 #endif
720                 } else
721                         error = ether_ioctl(ifp, command, data);
722                 break;
723 #endif
724         case SIOCSIFMTU:
725                 IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
726                 mtx_lock(&sc->mtx);
727                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
728                     ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
729                         error = EINVAL;
730                         IOCTL_DBG_IF(ifp, "mtu too large");
731                 } else {
732                         IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
733                         // ERJ: Interestingly enough, these types don't match
734                         ifp->if_mtu = (u_long)ifr->ifr_mtu;
735                         vsi->max_frame_size =
736                             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
737                             + ETHER_VLAN_ENCAP_LEN;
738                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
739                                 ixlv_init_locked(sc);
740                 }
741                 mtx_unlock(&sc->mtx);
742                 break;
743         case SIOCSIFFLAGS:
744                 IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
745                 mtx_lock(&sc->mtx);
746                 if (ifp->if_flags & IFF_UP) {
747                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
748                                 ixlv_init_locked(sc);
749                 } else
750                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
751                                 ixlv_stop(sc);
752                 sc->if_flags = ifp->if_flags;
753                 mtx_unlock(&sc->mtx);
754                 break;
755         case SIOCADDMULTI:
756                 IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
757                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
758                         mtx_lock(&sc->mtx);
759                         ixlv_disable_intr(vsi);
760                         ixlv_add_multi(vsi);
761                         ixlv_enable_intr(vsi);
762                         mtx_unlock(&sc->mtx);
763                 }
764                 break;
765         case SIOCDELMULTI:
766                 IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
767                 if (sc->init_state == IXLV_RUNNING) {
768                         mtx_lock(&sc->mtx);
769                         ixlv_disable_intr(vsi);
770                         ixlv_del_multi(vsi);
771                         ixlv_enable_intr(vsi);
772                         mtx_unlock(&sc->mtx);
773                 }
774                 break;
775         case SIOCSIFMEDIA:
776         case SIOCGIFMEDIA:
777                 IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
778                 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
779                 break;
780         case SIOCSIFCAP:
781         {
782                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
783                 IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
784
785                 ixlv_cap_txcsum_tso(vsi, ifp, mask);
786
787                 if (mask & IFCAP_RXCSUM)
788                         ifp->if_capenable ^= IFCAP_RXCSUM;
789                 if (mask & IFCAP_RXCSUM_IPV6)
790                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
791                 if (mask & IFCAP_LRO)
792                         ifp->if_capenable ^= IFCAP_LRO;
793                 if (mask & IFCAP_VLAN_HWTAGGING)
794                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
795                 if (mask & IFCAP_VLAN_HWFILTER)
796                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
797                 if (mask & IFCAP_VLAN_HWTSO)
798                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
799                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
800                         ixlv_init(vsi);
801                 }
802                 VLAN_CAPABILITIES(ifp);
803
804                 break;
805         }
806
807         default:
808                 IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
809                 error = ether_ioctl(ifp, command, data);
810                 break;
811         }
812
813         return (error);
814 }
815
816 /*
817 ** To do a reinit on the VF is unfortunately more complicated
818 ** than a physical device, we must have the PF more or less
819 ** completely recreate our memory, so many things that were
820 ** done only once at attach in traditional drivers now must be
821 ** redone at each reinitialization. This function does that
822 ** 'prelude' so we can then call the normal locked init code.
823 */
824 int
825 ixlv_reinit_locked(struct ixlv_sc *sc)
826 {
827         struct i40e_hw          *hw = &sc->hw;
828         struct ixl_vsi          *vsi = &sc->vsi;
829         struct ifnet            *ifp = vsi->ifp;
830         struct ixlv_mac_filter  *mf, *mf_temp;
831         struct ixlv_vlan_filter *vf;
832         int                     error = 0;
833
834         INIT_DBG_IF(ifp, "begin");
835
836         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
837                 ixlv_stop(sc);
838
839         error = ixlv_reset(sc);
840
841         INIT_DBG_IF(ifp, "VF was reset");
842
843         /* set the state in case we went thru RESET */
844         sc->init_state = IXLV_RUNNING;
845
846         /*
847         ** Resetting the VF drops all filters from hardware;
848         ** we need to mark them to be re-added in init.
849         */
850         SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
851                 if (mf->flags & IXL_FILTER_DEL) {
852                         SLIST_REMOVE(sc->mac_filters, mf,
853                             ixlv_mac_filter, next);
854                         free(mf, M_DEVBUF);
855                 } else
856                         mf->flags |= IXL_FILTER_ADD;
857         }
858         if (vsi->num_vlans != 0)
859                 SLIST_FOREACH(vf, sc->vlan_filters, next)
860                         vf->flags = IXL_FILTER_ADD;
861         else { /* clean any stale filters */
862                 while (!SLIST_EMPTY(sc->vlan_filters)) {
863                         vf = SLIST_FIRST(sc->vlan_filters);
864                         SLIST_REMOVE_HEAD(sc->vlan_filters, next);
865                         free(vf, M_DEVBUF);
866                 }
867         }
868
869         ixlv_enable_adminq_irq(hw);
870         ixl_vc_flush(&sc->vc_mgr);
871
872         INIT_DBG_IF(ifp, "end");
873         return (error);
874 }
875
876 static void
877 ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
878         enum i40e_status_code code)
879 {
880         struct ixlv_sc *sc;
881
882         sc = arg;
883
884         /*
885          * Ignore "Adapter Stopped" message as that happens if an ifconfig down
886          * happens while a command is in progress, so we don't print an error
887          * in that case.
888          */
889         if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
890                 if_printf(sc->vsi.ifp,
891                     "Error %s waiting for PF to complete operation %d\n",
892                     i40e_stat_str(&sc->hw, code), cmd->request);
893         }
894 }
895
896 static void
897 ixlv_init_locked(struct ixlv_sc *sc)
898 {
899         struct i40e_hw          *hw = &sc->hw;
900         struct ixl_vsi          *vsi = &sc->vsi;
901         struct ixl_queue        *que = vsi->queues;
902         struct ifnet            *ifp = vsi->ifp;
903         int                      error = 0;
904
905         INIT_DBG_IF(ifp, "begin");
906
907         IXLV_CORE_LOCK_ASSERT(sc);
908
909         /* Do a reinit first if an init has already been done */
910         if ((sc->init_state == IXLV_RUNNING) ||
911             (sc->init_state == IXLV_RESET_REQUIRED) ||
912             (sc->init_state == IXLV_RESET_PENDING))
913                 error = ixlv_reinit_locked(sc);
914         /* Don't bother with init if we failed reinit */
915         if (error)
916                 goto init_done;
917
918         /* Remove existing MAC filter if new MAC addr is set */
919         if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
920                 error = ixlv_del_mac_filter(sc, hw->mac.addr);
921                 if (error == 0)
922                         ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
923                             IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
924                             sc);
925         }
926
927         /* Check for an LAA mac address... */
928         bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
929
930         ifp->if_hwassist = 0;
931         if (ifp->if_capenable & IFCAP_TSO)
932                 ifp->if_hwassist |= CSUM_TSO;
933         if (ifp->if_capenable & IFCAP_TXCSUM)
934                 ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
935         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936                 ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
937
938         /* Add mac filter for this VF to PF */
939         if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
940                 error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
941                 if (!error || error == EEXIST)
942                         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
943                             IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
944                             sc);
945         }
946
947         /* Setup vlan's if needed */
948         ixlv_setup_vlan_filters(sc);
949
950         /* Prepare the queues for operation */
951         for (int i = 0; i < vsi->num_queues; i++, que++) {
952                 struct  rx_ring *rxr = &que->rxr;
953
954                 ixl_init_tx_ring(que);
955
956                 if (vsi->max_frame_size <= MCLBYTES)
957                         rxr->mbuf_sz = MCLBYTES;
958                 else
959                         rxr->mbuf_sz = MJUMPAGESIZE;
960                 ixl_init_rx_ring(que);
961         }
962
963         /* Set initial ITR values */
964         ixlv_configure_itr(sc);
965
966         /* Configure queues */
967         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
968             IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
969
970         /* Set up RSS */
971         ixlv_config_rss(sc);
972
973         /* Map vectors */
974         ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
975             IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
976
977         /* Enable queues */
978         ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
979             IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
980
981         /* Start the local timer */
982         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
983
984         sc->init_state = IXLV_RUNNING;
985
986 init_done:
987         INIT_DBG_IF(ifp, "end");
988         return;
989 }
990
991 /*
992 **  Init entry point for the stack
993 */
994 void
995 ixlv_init(void *arg)
996 {
997         struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
998         struct ixlv_sc *sc = vsi->back;
999         int retries = 0;
1000
1001         /* Prevent init from running again while waiting for AQ calls
1002          * made in init_locked() to complete. */
1003         mtx_lock(&sc->mtx);
1004         if (sc->init_in_progress) {
1005                 mtx_unlock(&sc->mtx);
1006                 return;
1007         } else
1008                 sc->init_in_progress = true;
1009
1010         ixlv_init_locked(sc);
1011         mtx_unlock(&sc->mtx);
1012
1013         /* Wait for init_locked to finish */
1014         while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
1015             && ++retries < IXLV_MAX_INIT_WAIT) {
1016                 i40e_msec_pause(25);
1017         }
1018         if (retries >= IXLV_MAX_INIT_WAIT) {
1019                 if_printf(vsi->ifp,
1020                     "Init failed to complete in allotted time!\n");
1021         }
1022
1023         mtx_lock(&sc->mtx);
1024         sc->init_in_progress = false;
1025         mtx_unlock(&sc->mtx);
1026 }
1027
1028 /*
1029  * ixlv_attach() helper function; gathers information about
1030  * the (virtual) hardware for use elsewhere in the driver.
1031  */
1032 static void
1033 ixlv_init_hw(struct ixlv_sc *sc)
1034 {
1035         struct i40e_hw *hw = &sc->hw;
1036         device_t dev = sc->dev;
1037         
1038         /* Save off the information about this board */
1039         hw->vendor_id = pci_get_vendor(dev);
1040         hw->device_id = pci_get_device(dev);
1041         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1042         hw->subsystem_vendor_id =
1043             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1044         hw->subsystem_device_id =
1045             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1046
1047         hw->bus.device = pci_get_slot(dev);
1048         hw->bus.func = pci_get_function(dev);
1049 }
1050
1051 /*
1052  * ixlv_attach() helper function; initalizes the admin queue
1053  * and attempts to establish contact with the PF by
1054  * retrying the initial "API version" message several times
1055  * or until the PF responds.
1056  */
1057 static int
1058 ixlv_setup_vc(struct ixlv_sc *sc)
1059 {
1060         struct i40e_hw *hw = &sc->hw;
1061         device_t dev = sc->dev;
1062         int error = 0, ret_error = 0, asq_retries = 0;
1063         bool send_api_ver_retried = 0;
1064
1065         /* Need to set these AQ paramters before initializing AQ */
1066         hw->aq.num_arq_entries = IXL_AQ_LEN;
1067         hw->aq.num_asq_entries = IXL_AQ_LEN;
1068         hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1069         hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1070
1071         for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1072                 /* Initialize admin queue */
1073                 error = i40e_init_adminq(hw);
1074                 if (error) {
1075                         device_printf(dev, "%s: init_adminq failed: %d\n",
1076                             __func__, error);
1077                         ret_error = 1;
1078                         continue;
1079                 }
1080
1081                 INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1082                     " send_api_ver attempt %d", i+1);
1083
1084 retry_send:
1085                 /* Send VF's API version */
1086                 error = ixlv_send_api_ver(sc);
1087                 if (error) {
1088                         i40e_shutdown_adminq(hw);
1089                         ret_error = 2;
1090                         device_printf(dev, "%s: unable to send api"
1091                             " version to PF on attempt %d, error %d\n",
1092                             __func__, i+1, error);
1093                 }
1094
1095                 asq_retries = 0;
1096                 while (!i40e_asq_done(hw)) {
1097                         if (++asq_retries > IXLV_AQ_MAX_ERR) {
1098                                 i40e_shutdown_adminq(hw);
1099                                 device_printf(dev, "Admin Queue timeout "
1100                                     "(waiting for send_api_ver), %d more tries...\n",
1101                                     IXLV_AQ_MAX_ERR - (i + 1));
1102                                 ret_error = 3;
1103                                 break;
1104                         } 
1105                         i40e_msec_pause(10);
1106                 }
1107                 if (asq_retries > IXLV_AQ_MAX_ERR)
1108                         continue;
1109
1110                 INIT_DBG_DEV(dev, "Sent API version message to PF");
1111
1112                 /* Verify that the VF accepts the PF's API version */
1113                 error = ixlv_verify_api_ver(sc);
1114                 if (error == ETIMEDOUT) {
1115                         if (!send_api_ver_retried) {
1116                                 /* Resend message, one more time */
1117                                 send_api_ver_retried = true;
1118                                 device_printf(dev,
1119                                     "%s: Timeout while verifying API version on first"
1120                                     " try!\n", __func__);
1121                                 goto retry_send;
1122                         } else {
1123                                 device_printf(dev,
1124                                     "%s: Timeout while verifying API version on second"
1125                                     " try!\n", __func__);
1126                                 ret_error = 4;
1127                                 break;
1128                         }
1129                 }
1130                 if (error) {
1131                         device_printf(dev,
1132                             "%s: Unable to verify API version,"
1133                             " error %s\n", __func__, i40e_stat_str(hw, error));
1134                         ret_error = 5;
1135                 }
1136                 break;
1137         }
1138
1139         if (ret_error >= 4)
1140                 i40e_shutdown_adminq(hw);
1141         return (ret_error);
1142 }
1143
1144 /*
1145  * ixlv_attach() helper function; asks the PF for this VF's
1146  * configuration, and saves the information if it receives it.
1147  */
1148 static int
1149 ixlv_vf_config(struct ixlv_sc *sc)
1150 {
1151         struct i40e_hw *hw = &sc->hw;
1152         device_t dev = sc->dev;
1153         int bufsz, error = 0, ret_error = 0;
1154         int asq_retries, retried = 0;
1155
1156 retry_config:
1157         error = ixlv_send_vf_config_msg(sc);
1158         if (error) {
1159                 device_printf(dev,
1160                     "%s: Unable to send VF config request, attempt %d,"
1161                     " error %d\n", __func__, retried + 1, error);
1162                 ret_error = 2;
1163         }
1164
1165         asq_retries = 0;
1166         while (!i40e_asq_done(hw)) {
1167                 if (++asq_retries > IXLV_AQ_MAX_ERR) {
1168                         device_printf(dev, "%s: Admin Queue timeout "
1169                             "(waiting for send_vf_config_msg), attempt %d\n",
1170                             __func__, retried + 1);
1171                         ret_error = 3;
1172                         goto fail;
1173                 }
1174                 i40e_msec_pause(10);
1175         }
1176
1177         INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1178             retried + 1);
1179
1180         if (!sc->vf_res) {
1181                 bufsz = sizeof(struct virtchnl_vf_resource) +
1182                     (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1183                 sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1184                 if (!sc->vf_res) {
1185                         device_printf(dev,
1186                             "%s: Unable to allocate memory for VF configuration"
1187                             " message from PF on attempt %d\n", __func__, retried + 1);
1188                         ret_error = 1;
1189                         goto fail;
1190                 }
1191         }
1192
1193         /* Check for VF config response */
1194         error = ixlv_get_vf_config(sc);
1195         if (error == ETIMEDOUT) {
1196                 /* The 1st time we timeout, send the configuration message again */
1197                 if (!retried) {
1198                         retried++;
1199                         goto retry_config;
1200                 }
1201                 device_printf(dev,
1202                     "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1203                     __func__);
1204         }
1205         if (error) {
1206                 device_printf(dev,
1207                     "%s: Unable to get VF configuration from PF after %d tries!\n",
1208                     __func__, retried + 1);
1209                 ret_error = 4;
1210         }
1211         goto done;
1212
1213 fail:
1214         free(sc->vf_res, M_DEVBUF);
1215 done:
1216         return (ret_error);
1217 }
1218
1219 /*
1220  * Allocate MSI/X vectors, setup the AQ vector early
1221  */
1222 static int
1223 ixlv_init_msix(struct ixlv_sc *sc)
1224 {
1225         device_t dev = sc->dev;
1226         int rid, want, vectors, queues, available;
1227         int auto_max_queues;
1228
1229         rid = PCIR_BAR(IXL_MSIX_BAR);
1230         sc->msix_mem = bus_alloc_resource_any(dev,
1231             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1232         if (!sc->msix_mem) {
1233                 /* May not be enabled */
1234                 device_printf(sc->dev,
1235                     "Unable to map MSIX table\n");
1236                 goto fail;
1237         }
1238
1239         available = pci_msix_count(dev); 
1240         if (available == 0) { /* system has msix disabled */
1241                 bus_release_resource(dev, SYS_RES_MEMORY,
1242                     rid, sc->msix_mem);
1243                 sc->msix_mem = NULL;
1244                 goto fail;
1245         }
1246
1247         /* Clamp queues to number of CPUs and # of MSI-X vectors available */
1248         auto_max_queues = min(mp_ncpus, available - 1);
1249         /* Clamp queues to # assigned to VF by PF */
1250         auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1251
1252         /* Override with tunable value if tunable is less than autoconfig count */
1253         if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1254                 queues = ixlv_max_queues;
1255         /* Use autoconfig amount if that's lower */
1256         else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1257                 device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1258                     "autoconfig amount (%d)...\n",
1259                     ixlv_max_queues, auto_max_queues);
1260                 queues = auto_max_queues;
1261         }
1262         /* Limit maximum auto-configured queues to 8 if no user value is set */
1263         else
1264                 queues = min(auto_max_queues, 8);
1265
1266 #ifdef  RSS
1267         /* If we're doing RSS, clamp at the number of RSS buckets */
1268         if (queues > rss_getnumbuckets())
1269                 queues = rss_getnumbuckets();
1270 #endif
1271
1272         /*
1273         ** Want one vector (RX/TX pair) per queue
1274         ** plus an additional for the admin queue.
1275         */
1276         want = queues + 1;
1277         if (want <= available)  /* Have enough */
1278                 vectors = want;
1279         else {
1280                 device_printf(sc->dev,
1281                     "MSIX Configuration Problem, "
1282                     "%d vectors available but %d wanted!\n",
1283                     available, want);
1284                 goto fail;
1285         }
1286
1287 #ifdef RSS
1288         /*
1289         * If we're doing RSS, the number of queues needs to
1290         * match the number of RSS buckets that are configured.
1291         *
1292         * + If there's more queues than RSS buckets, we'll end
1293         *   up with queues that get no traffic.
1294         *
1295         * + If there's more RSS buckets than queues, we'll end
1296         *   up having multiple RSS buckets map to the same queue,
1297         *   so there'll be some contention.
1298         */
1299         if (queues != rss_getnumbuckets()) {
1300                 device_printf(dev,
1301                     "%s: queues (%d) != RSS buckets (%d)"
1302                     "; performance will be impacted.\n",
1303                      __func__, queues, rss_getnumbuckets());
1304         }
1305 #endif
1306
1307         if (pci_alloc_msix(dev, &vectors) == 0) {
1308                 device_printf(sc->dev,
1309                     "Using MSIX interrupts with %d vectors\n", vectors);
1310                 sc->msix = vectors;
1311                 sc->vsi.num_queues = queues;
1312         }
1313
1314         /* Next we need to setup the vector for the Admin Queue */
1315         rid = 1;        /* zero vector + 1 */
1316         sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1317             &rid, RF_SHAREABLE | RF_ACTIVE);
1318         if (sc->res == NULL) {
1319                 device_printf(dev, "Unable to allocate"
1320                     " bus resource: AQ interrupt \n");
1321                 goto fail;
1322         }
1323         if (bus_setup_intr(dev, sc->res,
1324             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1325             ixlv_msix_adminq, sc, &sc->tag)) {
1326                 sc->res = NULL;
1327                 device_printf(dev, "Failed to register AQ handler");
1328                 goto fail;
1329         }
1330         bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1331
1332         return (vectors);
1333
1334 fail:
1335         /* The VF driver MUST use MSIX */
1336         return (0);
1337 }
1338
1339 static int
1340 ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1341 {
1342         int             rid;
1343         device_t        dev = sc->dev;
1344
1345         rid = PCIR_BAR(0);
1346         sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1347             &rid, RF_ACTIVE);
1348
1349         if (!(sc->pci_mem)) {
1350                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1351                 return (ENXIO);
1352         }
1353
1354         sc->osdep.mem_bus_space_tag =
1355                 rman_get_bustag(sc->pci_mem);
1356         sc->osdep.mem_bus_space_handle =
1357                 rman_get_bushandle(sc->pci_mem);
1358         sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1359         sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1360         sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1361         sc->hw.back = &sc->osdep;
1362
1363         ixl_set_busmaster(dev);
1364         ixl_set_msix_enable(dev);
1365
1366         /* Disable adminq interrupts (just in case) */
1367         ixlv_disable_adminq_irq(&sc->hw);
1368
1369         return (0);
1370 }
1371
1372 /*
1373  * Free MSI-X related resources for a single queue
1374  */
1375 static void
1376 ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
1377 {
1378         device_t                dev = sc->dev;
1379
1380         /*
1381         **  Release all msix queue resources:
1382         */
1383         if (que->tag != NULL) {
1384                 bus_teardown_intr(dev, que->res, que->tag);
1385                 que->tag = NULL;
1386         }
1387         if (que->res != NULL) {
1388                 int rid = que->msix + 1;
1389                 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1390                 que->res = NULL;
1391         }
1392         if (que->tq != NULL) {
1393                 taskqueue_free(que->tq);
1394                 que->tq = NULL;
1395         }
1396 }
1397
1398 static void
1399 ixlv_free_pci_resources(struct ixlv_sc *sc)
1400 {
1401         device_t                dev = sc->dev;
1402
1403         pci_release_msi(dev);
1404
1405         if (sc->msix_mem != NULL)
1406                 bus_release_resource(dev, SYS_RES_MEMORY,
1407                     PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1408
1409         if (sc->pci_mem != NULL)
1410                 bus_release_resource(dev, SYS_RES_MEMORY,
1411                     PCIR_BAR(0), sc->pci_mem);
1412 }
1413
1414 /*
1415  * Create taskqueue and tasklet for Admin Queue interrupts.
1416  */
1417 static int
1418 ixlv_init_taskqueue(struct ixlv_sc *sc)
1419 {
1420         int error = 0;
1421
1422         TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1423
1424         sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1425             taskqueue_thread_enqueue, &sc->tq);
1426         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1427             device_get_nameunit(sc->dev));
1428
1429         return (error);
1430 }
1431
1432 /*********************************************************************
1433  *
1434  *  Setup MSIX Interrupt resources and handlers for the VSI queues
1435  *
1436  **********************************************************************/
1437 static int
1438 ixlv_assign_msix(struct ixlv_sc *sc)
1439 {
1440         device_t        dev = sc->dev;
1441         struct          ixl_vsi *vsi = &sc->vsi;
1442         struct          ixl_queue *que = vsi->queues;
1443         struct          tx_ring  *txr;
1444         int             error, rid, vector = 1;
1445 #ifdef  RSS
1446         cpuset_t        cpu_mask;
1447 #endif
1448
1449         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1450                 int cpu_id = i;
1451                 rid = vector + 1;
1452                 txr = &que->txr;
1453                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1454                     RF_SHAREABLE | RF_ACTIVE);
1455                 if (que->res == NULL) {
1456                         device_printf(dev,"Unable to allocate"
1457                             " bus resource: que interrupt [%d]\n", vector);
1458                         return (ENXIO);
1459                 }
1460                 /* Set the handler function */
1461                 error = bus_setup_intr(dev, que->res,
1462                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1463                     ixlv_msix_que, que, &que->tag);
1464                 if (error) {
1465                         que->tag = NULL;
1466                         device_printf(dev, "Failed to register que handler");
1467                         return (error);
1468                 }
1469                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1470                 /* Bind the vector to a CPU */
1471 #ifdef RSS
1472                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1473 #endif
1474                 bus_bind_intr(dev, que->res, cpu_id);
1475                 que->msix = vector;
1476                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1477                 TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1478                 que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1479                     taskqueue_thread_enqueue, &que->tq);
1480 #ifdef RSS
1481                 CPU_SETOF(cpu_id, &cpu_mask);
1482                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1483                     &cpu_mask, "%s (bucket %d)",
1484                     device_get_nameunit(dev), cpu_id);
1485 #else
1486                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1487                     "%s que", device_get_nameunit(dev));
1488 #endif
1489
1490         }
1491
1492         return (0);
1493 }
1494
1495 /*
1496 ** Requests a VF reset from the PF.
1497 **
1498 ** Requires the VF's Admin Queue to be initialized.
1499 */
1500 static int
1501 ixlv_reset(struct ixlv_sc *sc)
1502 {
1503         struct i40e_hw  *hw = &sc->hw;
1504         device_t        dev = sc->dev;
1505         int             error = 0;
1506
1507         /* Ask the PF to reset us if we are initiating */
1508         if (sc->init_state != IXLV_RESET_PENDING)
1509                 ixlv_request_reset(sc);
1510
1511         i40e_msec_pause(100);
1512         error = ixlv_reset_complete(hw);
1513         if (error) {
1514                 device_printf(dev, "%s: VF reset failed\n",
1515                     __func__);
1516                 return (error);
1517         }
1518
1519         error = i40e_shutdown_adminq(hw);
1520         if (error) {
1521                 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1522                     __func__, error);
1523                 return (error);
1524         }
1525
1526         error = i40e_init_adminq(hw);
1527         if (error) {
1528                 device_printf(dev, "%s: init_adminq failed: %d\n",
1529                     __func__, error);
1530                 return(error);
1531         }
1532
1533         return (0);
1534 }
1535
1536 static int
1537 ixlv_reset_complete(struct i40e_hw *hw)
1538 {
1539         u32 reg;
1540
1541         /* Wait up to ~10 seconds */
1542         for (int i = 0; i < 100; i++) {
1543                 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1544                     I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1545
1546                 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1547                     (reg == VIRTCHNL_VFR_COMPLETED))
1548                         return (0);
1549                 i40e_msec_pause(100);
1550         }
1551
1552         return (EBUSY);
1553 }
1554
1555
1556 /*********************************************************************
1557  *
1558  *  Setup networking device structure and register an interface.
1559  *
1560  **********************************************************************/
1561 static int
1562 ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1563 {
1564         struct ifnet            *ifp;
1565         struct ixl_vsi          *vsi = &sc->vsi;
1566         struct ixl_queue        *que = vsi->queues;
1567
1568         INIT_DBG_DEV(dev, "begin");
1569
1570         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1571         if (ifp == NULL) {
1572                 device_printf(dev, "%s: could not allocate ifnet"
1573                     " structure!\n", __func__);
1574                 return (-1);
1575         }
1576
1577         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1578
1579         ifp->if_mtu = ETHERMTU;
1580 #if __FreeBSD_version >= 1100000
1581         ifp->if_baudrate = IF_Gbps(40);
1582 #else
1583         if_initbaudrate(ifp, IF_Gbps(40));
1584 #endif
1585         ifp->if_init = ixlv_init;
1586         ifp->if_softc = vsi;
1587         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1588         ifp->if_ioctl = ixlv_ioctl;
1589
1590 #if __FreeBSD_version >= 1100000
1591         if_setgetcounterfn(ifp, ixl_get_counter);
1592 #endif
1593
1594         ifp->if_transmit = ixl_mq_start;
1595
1596         ifp->if_qflush = ixl_qflush;
1597         ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1598
1599         ether_ifattach(ifp, sc->hw.mac.addr);
1600
1601         vsi->max_frame_size =
1602             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1603             + ETHER_VLAN_ENCAP_LEN;
1604
1605         ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1606         ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1607         ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1608
1609         /*
1610          * Tell the upper layer(s) we support long frames.
1611          */
1612         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1613
1614         ifp->if_capabilities |= IFCAP_HWCSUM;
1615         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1616         ifp->if_capabilities |= IFCAP_TSO;
1617         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1618
1619         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1620                              |  IFCAP_VLAN_HWTSO
1621                              |  IFCAP_VLAN_MTU
1622                              |  IFCAP_VLAN_HWCSUM
1623                              |  IFCAP_LRO;
1624         ifp->if_capenable = ifp->if_capabilities;
1625
1626         /*
1627         ** Don't turn this on by default, if vlans are
1628         ** created on another pseudo device (eg. lagg)
1629         ** then vlan events are not passed thru, breaking
1630         ** operation, but with HW FILTER off it works. If
1631         ** using vlans directly on the ixl driver you can
1632         ** enable this and get full hardware tag filtering.
1633         */
1634         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1635
1636         /*
1637          * Specify the media types supported by this adapter and register
1638          * callbacks to update media and link information
1639          */
1640         ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1641                      ixlv_media_status);
1642
1643         /* Media types based on reported link speed over AdminQ */
1644         ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1645         ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1646         ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1647         ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1648         ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1649
1650         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1651         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1652
1653         INIT_DBG_DEV(dev, "end");
1654         return (0);
1655 }
1656
1657 /*
1658 ** Allocate and setup a single queue
1659 */
1660 static int
1661 ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
1662 {
1663         device_t                dev = sc->dev;
1664         struct tx_ring          *txr;
1665         struct rx_ring          *rxr;
1666         int                     rsize, tsize;
1667         int                     error = I40E_SUCCESS;
1668
1669         txr = &que->txr;
1670         txr->que = que;
1671         txr->tail = I40E_QTX_TAIL1(que->me);
1672         /* Initialize the TX lock */
1673         snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1674             device_get_nameunit(dev), que->me);
1675         mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1676         /*
1677          * Create the TX descriptor ring
1678          *
1679          * In Head Writeback mode, the descriptor ring is one bigger
1680          * than the number of descriptors for space for the HW to
1681          * write back index of last completed descriptor.
1682          */
1683         if (sc->vsi.enable_head_writeback) {
1684                 tsize = roundup2((que->num_tx_desc *
1685                     sizeof(struct i40e_tx_desc)) +
1686                     sizeof(u32), DBA_ALIGN);
1687         } else {
1688                 tsize = roundup2((que->num_tx_desc *
1689                     sizeof(struct i40e_tx_desc)), DBA_ALIGN);
1690         }
1691         if (i40e_allocate_dma_mem(&sc->hw,
1692             &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1693                 device_printf(dev,
1694                     "Unable to allocate TX Descriptor memory\n");
1695                 error = ENOMEM;
1696                 goto err_destroy_tx_mtx;
1697         }
1698         txr->base = (struct i40e_tx_desc *)txr->dma.va;
1699         bzero((void *)txr->base, tsize);
1700         /* Now allocate transmit soft structs for the ring */
1701         if (ixl_allocate_tx_data(que)) {
1702                 device_printf(dev,
1703                     "Critical Failure setting up TX structures\n");
1704                 error = ENOMEM;
1705                 goto err_free_tx_dma;
1706         }
1707         /* Allocate a buf ring */
1708         txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1709             M_WAITOK, &txr->mtx);
1710         if (txr->br == NULL) {
1711                 device_printf(dev,
1712                     "Critical Failure setting up TX buf ring\n");
1713                 error = ENOMEM;
1714                 goto err_free_tx_data;
1715         }
1716
1717         /*
1718          * Next the RX queues...
1719          */
1720         rsize = roundup2(que->num_rx_desc *
1721             sizeof(union i40e_rx_desc), DBA_ALIGN);
1722         rxr = &que->rxr;
1723         rxr->que = que;
1724         rxr->tail = I40E_QRX_TAIL1(que->me);
1725
1726         /* Initialize the RX side lock */
1727         snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1728             device_get_nameunit(dev), que->me);
1729         mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1730
1731         if (i40e_allocate_dma_mem(&sc->hw,
1732             &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1733                 device_printf(dev,
1734                     "Unable to allocate RX Descriptor memory\n");
1735                 error = ENOMEM;
1736                 goto err_destroy_rx_mtx;
1737         }
1738         rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1739         bzero((void *)rxr->base, rsize);
1740
1741         /* Allocate receive soft structs for the ring */
1742         if (ixl_allocate_rx_data(que)) {
1743                 device_printf(dev,
1744                     "Critical Failure setting up receive structs\n");
1745                 error = ENOMEM;
1746                 goto err_free_rx_dma;
1747         }
1748
1749         return (0);
1750
1751 err_free_rx_dma:
1752         i40e_free_dma_mem(&sc->hw, &rxr->dma);
1753 err_destroy_rx_mtx:
1754         mtx_destroy(&rxr->mtx);
1755         /* err_free_tx_buf_ring */
1756         buf_ring_free(txr->br, M_DEVBUF);
1757 err_free_tx_data:
1758         ixl_free_que_tx(que);
1759 err_free_tx_dma:
1760         i40e_free_dma_mem(&sc->hw, &txr->dma);
1761 err_destroy_tx_mtx:
1762         mtx_destroy(&txr->mtx);
1763
1764         return (error);
1765 }
1766
1767 /*
1768 ** Allocate and setup the interface queues
1769 */
1770 static int
1771 ixlv_setup_queues(struct ixlv_sc *sc)
1772 {
1773         device_t                dev = sc->dev;
1774         struct ixl_vsi          *vsi;
1775         struct ixl_queue        *que;
1776         int                     i;
1777         int                     error = I40E_SUCCESS;
1778
1779         vsi = &sc->vsi;
1780         vsi->back = (void *)sc;
1781         vsi->hw = &sc->hw;
1782         vsi->num_vlans = 0;
1783
1784         /* Get memory for the station queues */
1785         if (!(vsi->queues =
1786                 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1787                 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1788                         device_printf(dev, "Unable to allocate queue memory\n");
1789                         return ENOMEM;
1790         }
1791
1792         for (i = 0; i < vsi->num_queues; i++) {
1793                 que = &vsi->queues[i];
1794                 que->num_tx_desc = vsi->num_tx_desc;
1795                 que->num_rx_desc = vsi->num_rx_desc;
1796                 que->me = i;
1797                 que->vsi = vsi;
1798
1799                 if (ixlv_setup_queue(sc, que)) {
1800                         error = ENOMEM;
1801                         goto err_free_queues;
1802                 }
1803         }
1804
1805         return (0);
1806
1807 err_free_queues:
1808         while (i--)
1809                 ixlv_free_queue(sc, &vsi->queues[i]);
1810
1811         free(vsi->queues, M_DEVBUF);
1812
1813         return (error);
1814 }
1815
1816 /*
1817 ** This routine is run via an vlan config EVENT,
1818 ** it enables us to use the HW Filter table since
1819 ** we can get the vlan id. This just creates the
1820 ** entry in the soft version of the VFTA, init will
1821 ** repopulate the real table.
1822 */
1823 static void
1824 ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1825 {
1826         struct ixl_vsi          *vsi = arg;
1827         struct ixlv_sc          *sc = vsi->back;
1828         struct ixlv_vlan_filter *v;
1829
1830
1831         if (ifp->if_softc != arg)   /* Not our event */
1832                 return;
1833
1834         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1835                 return;
1836
1837         /* Sanity check - make sure it doesn't already exist */
1838         SLIST_FOREACH(v, sc->vlan_filters, next) {
1839                 if (v->vlan == vtag)
1840                         return;
1841         }
1842
1843         mtx_lock(&sc->mtx);
1844         ++vsi->num_vlans;
1845         v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1846         SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1847         v->vlan = vtag;
1848         v->flags = IXL_FILTER_ADD;
1849         ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1850             IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1851         mtx_unlock(&sc->mtx);
1852         return;
1853 }
1854
1855 /*
1856 ** This routine is run via an vlan
1857 ** unconfig EVENT, remove our entry
1858 ** in the soft vfta.
1859 */
1860 static void
1861 ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1862 {
1863         struct ixl_vsi          *vsi = arg;
1864         struct ixlv_sc          *sc = vsi->back;
1865         struct ixlv_vlan_filter *v;
1866         int                     i = 0;
1867         
1868         if (ifp->if_softc != arg)
1869                 return;
1870
1871         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1872                 return;
1873
1874         mtx_lock(&sc->mtx);
1875         SLIST_FOREACH(v, sc->vlan_filters, next) {
1876                 if (v->vlan == vtag) {
1877                         v->flags = IXL_FILTER_DEL;
1878                         ++i;
1879                         --vsi->num_vlans;
1880                 }
1881         }
1882         if (i)
1883                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1884                     IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1885         mtx_unlock(&sc->mtx);
1886         return;
1887 }
1888
1889 /*
1890 ** Get a new filter and add it to the mac filter list.
1891 */
1892 static struct ixlv_mac_filter *
1893 ixlv_get_mac_filter(struct ixlv_sc *sc)
1894 {
1895         struct ixlv_mac_filter  *f;
1896
1897         f = malloc(sizeof(struct ixlv_mac_filter),
1898             M_DEVBUF, M_NOWAIT | M_ZERO);
1899         if (f)
1900                 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1901
1902         return (f);
1903 }
1904
1905 /*
1906 ** Find the filter with matching MAC address
1907 */
1908 static struct ixlv_mac_filter *
1909 ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1910 {
1911         struct ixlv_mac_filter  *f;
1912         bool                            match = FALSE;
1913
1914         SLIST_FOREACH(f, sc->mac_filters, next) {
1915                 if (cmp_etheraddr(f->macaddr, macaddr)) {
1916                         match = TRUE;
1917                         break;
1918                 }
1919         }       
1920
1921         if (!match)
1922                 f = NULL;
1923         return (f);
1924 }
1925
1926 static int
1927 ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1928 {
1929         device_t                dev = sc->dev;
1930         int                     error = 0;
1931
1932         if (sc->tag != NULL) {
1933                 bus_teardown_intr(dev, sc->res, sc->tag);
1934                 if (error) {
1935                         device_printf(dev, "bus_teardown_intr() for"
1936                             " interrupt 0 failed\n");
1937                         // return (ENXIO);
1938                 }
1939                 sc->tag = NULL;
1940         }
1941         if (sc->res != NULL) {
1942                 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1943                 if (error) {
1944                         device_printf(dev, "bus_release_resource() for"
1945                             " interrupt 0 failed\n");
1946                         // return (ENXIO);
1947                 }
1948                 sc->res = NULL;
1949         }
1950
1951         return (0);
1952
1953 }
1954
1955 /*
1956 ** Admin Queue interrupt handler
1957 */
1958 static void
1959 ixlv_msix_adminq(void *arg)
1960 {
1961         struct ixlv_sc  *sc = arg;
1962         struct i40e_hw  *hw = &sc->hw;
1963         u32             reg, mask;
1964
1965         reg = rd32(hw, I40E_VFINT_ICR01);
1966         mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1967
1968         reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1969         reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1970         wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1971
1972         /* schedule task */
1973         taskqueue_enqueue(sc->tq, &sc->aq_irq);
1974         return;
1975 }
1976
1977 void
1978 ixlv_enable_intr(struct ixl_vsi *vsi)
1979 {
1980         struct i40e_hw          *hw = vsi->hw;
1981         struct ixl_queue        *que = vsi->queues;
1982
1983         ixlv_enable_adminq_irq(hw);
1984         for (int i = 0; i < vsi->num_queues; i++, que++)
1985                 ixlv_enable_queue_irq(hw, que->me);
1986 }
1987
1988 void
1989 ixlv_disable_intr(struct ixl_vsi *vsi)
1990 {
1991         struct i40e_hw          *hw = vsi->hw;
1992         struct ixl_queue       *que = vsi->queues;
1993
1994         ixlv_disable_adminq_irq(hw);
1995         for (int i = 0; i < vsi->num_queues; i++, que++)
1996                 ixlv_disable_queue_irq(hw, que->me);
1997 }
1998
1999
2000 static void
2001 ixlv_disable_adminq_irq(struct i40e_hw *hw)
2002 {
2003         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
2004         wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
2005         /* flush */
2006         rd32(hw, I40E_VFGEN_RSTAT);
2007         return;
2008 }
2009
2010 static void
2011 ixlv_enable_adminq_irq(struct i40e_hw *hw)
2012 {
2013         wr32(hw, I40E_VFINT_DYN_CTL01,
2014             I40E_VFINT_DYN_CTL01_INTENA_MASK |
2015             I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
2016         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
2017         /* flush */
2018         rd32(hw, I40E_VFGEN_RSTAT);
2019         return;
2020 }
2021
2022 static void
2023 ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
2024 {
2025         u32             reg;
2026
2027         reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2028             I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
2029             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
2030         wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
2031 }
2032
2033 static void
2034 ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
2035 {
2036         wr32(hw, I40E_VFINT_DYN_CTLN1(id),
2037             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2038         rd32(hw, I40E_VFGEN_RSTAT);
2039         return;
2040 }
2041
2042 /*
2043  * Get initial ITR values from tunable values.
2044  */
2045 static void
2046 ixlv_configure_itr(struct ixlv_sc *sc)
2047 {
2048         struct i40e_hw          *hw = &sc->hw;
2049         struct ixl_vsi          *vsi = &sc->vsi;
2050         struct ixl_queue        *que = vsi->queues;
2051
2052         vsi->rx_itr_setting = ixlv_rx_itr;
2053         vsi->tx_itr_setting = ixlv_tx_itr;
2054
2055         for (int i = 0; i < vsi->num_queues; i++, que++) {
2056                 struct tx_ring  *txr = &que->txr;
2057                 struct rx_ring  *rxr = &que->rxr;
2058
2059                 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
2060                     vsi->rx_itr_setting);
2061                 rxr->itr = vsi->rx_itr_setting;
2062                 rxr->latency = IXL_AVE_LATENCY;
2063
2064                 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
2065                     vsi->tx_itr_setting);
2066                 txr->itr = vsi->tx_itr_setting;
2067                 txr->latency = IXL_AVE_LATENCY;
2068         }
2069 }
2070
2071 /*
2072 ** Provide a update to the queue RX
2073 ** interrupt moderation value.
2074 */
2075 static void
2076 ixlv_set_queue_rx_itr(struct ixl_queue *que)
2077 {
2078         struct ixl_vsi  *vsi = que->vsi;
2079         struct i40e_hw  *hw = vsi->hw;
2080         struct rx_ring  *rxr = &que->rxr;
2081         u16             rx_itr;
2082         u16             rx_latency = 0;
2083         int             rx_bytes;
2084
2085
2086         /* Idle, do nothing */
2087         if (rxr->bytes == 0)
2088                 return;
2089
2090         if (ixlv_dynamic_rx_itr) {
2091                 rx_bytes = rxr->bytes/rxr->itr;
2092                 rx_itr = rxr->itr;
2093
2094                 /* Adjust latency range */
2095                 switch (rxr->latency) {
2096                 case IXL_LOW_LATENCY:
2097                         if (rx_bytes > 10) {
2098                                 rx_latency = IXL_AVE_LATENCY;
2099                                 rx_itr = IXL_ITR_20K;
2100                         }
2101                         break;
2102                 case IXL_AVE_LATENCY:
2103                         if (rx_bytes > 20) {
2104                                 rx_latency = IXL_BULK_LATENCY;
2105                                 rx_itr = IXL_ITR_8K;
2106                         } else if (rx_bytes <= 10) {
2107                                 rx_latency = IXL_LOW_LATENCY;
2108                                 rx_itr = IXL_ITR_100K;
2109                         }
2110                         break;
2111                 case IXL_BULK_LATENCY:
2112                         if (rx_bytes <= 20) {
2113                                 rx_latency = IXL_AVE_LATENCY;
2114                                 rx_itr = IXL_ITR_20K;
2115                         }
2116                         break;
2117                  }
2118
2119                 rxr->latency = rx_latency;
2120
2121                 if (rx_itr != rxr->itr) {
2122                         /* do an exponential smoothing */
2123                         rx_itr = (10 * rx_itr * rxr->itr) /
2124                             ((9 * rx_itr) + rxr->itr);
2125                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2126                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2127                             que->me), rxr->itr);
2128                 }
2129         } else { /* We may have have toggled to non-dynamic */
2130                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2131                         vsi->rx_itr_setting = ixlv_rx_itr;
2132                 /* Update the hardware if needed */
2133                 if (rxr->itr != vsi->rx_itr_setting) {
2134                         rxr->itr = vsi->rx_itr_setting;
2135                         wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2136                             que->me), rxr->itr);
2137                 }
2138         }
2139         rxr->bytes = 0;
2140         rxr->packets = 0;
2141         return;
2142 }
2143
2144
2145 /*
2146 ** Provide a update to the queue TX
2147 ** interrupt moderation value.
2148 */
2149 static void
2150 ixlv_set_queue_tx_itr(struct ixl_queue *que)
2151 {
2152         struct ixl_vsi  *vsi = que->vsi;
2153         struct i40e_hw  *hw = vsi->hw;
2154         struct tx_ring  *txr = &que->txr;
2155         u16             tx_itr;
2156         u16             tx_latency = 0;
2157         int             tx_bytes;
2158
2159
2160         /* Idle, do nothing */
2161         if (txr->bytes == 0)
2162                 return;
2163
2164         if (ixlv_dynamic_tx_itr) {
2165                 tx_bytes = txr->bytes/txr->itr;
2166                 tx_itr = txr->itr;
2167
2168                 switch (txr->latency) {
2169                 case IXL_LOW_LATENCY:
2170                         if (tx_bytes > 10) {
2171                                 tx_latency = IXL_AVE_LATENCY;
2172                                 tx_itr = IXL_ITR_20K;
2173                         }
2174                         break;
2175                 case IXL_AVE_LATENCY:
2176                         if (tx_bytes > 20) {
2177                                 tx_latency = IXL_BULK_LATENCY;
2178                                 tx_itr = IXL_ITR_8K;
2179                         } else if (tx_bytes <= 10) {
2180                                 tx_latency = IXL_LOW_LATENCY;
2181                                 tx_itr = IXL_ITR_100K;
2182                         }
2183                         break;
2184                 case IXL_BULK_LATENCY:
2185                         if (tx_bytes <= 20) {
2186                                 tx_latency = IXL_AVE_LATENCY;
2187                                 tx_itr = IXL_ITR_20K;
2188                         }
2189                         break;
2190                 }
2191
2192                 txr->latency = tx_latency;
2193
2194                 if (tx_itr != txr->itr) {
2195                  /* do an exponential smoothing */
2196                         tx_itr = (10 * tx_itr * txr->itr) /
2197                             ((9 * tx_itr) + txr->itr);
2198                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2199                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2200                             que->me), txr->itr);
2201                 }
2202
2203         } else { /* We may have have toggled to non-dynamic */
2204                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2205                         vsi->tx_itr_setting = ixlv_tx_itr;
2206                 /* Update the hardware if needed */
2207                 if (txr->itr != vsi->tx_itr_setting) {
2208                         txr->itr = vsi->tx_itr_setting;
2209                         wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2210                             que->me), txr->itr);
2211                 }
2212         }
2213         txr->bytes = 0;
2214         txr->packets = 0;
2215         return;
2216 }
2217
2218
2219 /*
2220 **
2221 ** MSIX Interrupt Handlers and Tasklets
2222 **
2223 */
2224 static void
2225 ixlv_handle_que(void *context, int pending)
2226 {
2227         struct ixl_queue *que = context;
2228         struct ixl_vsi *vsi = que->vsi;
2229         struct i40e_hw  *hw = vsi->hw;
2230         struct tx_ring  *txr = &que->txr;
2231         struct ifnet    *ifp = vsi->ifp;
2232         bool            more;
2233
2234         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2235                 more = ixl_rxeof(que, IXL_RX_LIMIT);
2236                 mtx_lock(&txr->mtx);
2237                 ixl_txeof(que);
2238                 if (!drbr_empty(ifp, txr->br))
2239                         ixl_mq_start_locked(ifp, txr);
2240                 mtx_unlock(&txr->mtx);
2241                 if (more) {
2242                         taskqueue_enqueue(que->tq, &que->task);
2243                         return;
2244                 }
2245         }
2246
2247         /* Reenable this interrupt - hmmm */
2248         ixlv_enable_queue_irq(hw, que->me);
2249         return;
2250 }
2251
2252
2253 /*********************************************************************
2254  *
2255  *  MSIX Queue Interrupt Service routine
2256  *
2257  **********************************************************************/
2258 static void
2259 ixlv_msix_que(void *arg)
2260 {
2261         struct ixl_queue        *que = arg;
2262         struct ixl_vsi  *vsi = que->vsi;
2263         struct i40e_hw  *hw = vsi->hw;
2264         struct tx_ring  *txr = &que->txr;
2265         bool            more_tx, more_rx;
2266
2267         /* Spurious interrupts are ignored */
2268         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2269                 return;
2270
2271         ++que->irqs;
2272
2273         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2274
2275         mtx_lock(&txr->mtx);
2276         more_tx = ixl_txeof(que);
2277         /*
2278         ** Make certain that if the stack 
2279         ** has anything queued the task gets
2280         ** scheduled to handle it.
2281         */
2282         if (!drbr_empty(vsi->ifp, txr->br))
2283                 more_tx = 1;
2284         mtx_unlock(&txr->mtx);
2285
2286         ixlv_set_queue_rx_itr(que);
2287         ixlv_set_queue_tx_itr(que);
2288
2289         if (more_tx || more_rx)
2290                 taskqueue_enqueue(que->tq, &que->task);
2291         else
2292                 ixlv_enable_queue_irq(hw, que->me);
2293
2294         return;
2295 }
2296
2297
2298 /*********************************************************************
2299  *
2300  *  Media Ioctl callback
2301  *
2302  *  This routine is called whenever the user queries the status of
2303  *  the interface using ifconfig.
2304  *
2305  **********************************************************************/
2306 static void
2307 ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2308 {
2309         struct ixl_vsi          *vsi = ifp->if_softc;
2310         struct ixlv_sc  *sc = vsi->back;
2311
2312         INIT_DBG_IF(ifp, "begin");
2313
2314         mtx_lock(&sc->mtx);
2315
2316         ixlv_update_link_status(sc);
2317
2318         ifmr->ifm_status = IFM_AVALID;
2319         ifmr->ifm_active = IFM_ETHER;
2320
2321         if (!sc->link_up) {
2322                 mtx_unlock(&sc->mtx);
2323                 INIT_DBG_IF(ifp, "end: link not up");
2324                 return;
2325         }
2326
2327         ifmr->ifm_status |= IFM_ACTIVE;
2328         /* Hardware is always full-duplex */
2329         ifmr->ifm_active |= IFM_FDX;
2330
2331         /* Based on the link speed reported by the PF over the AdminQ, choose a
2332          * PHY type to report. This isn't 100% correct since we don't really
2333          * know the underlying PHY type of the PF, but at least we can report
2334          * a valid link speed...
2335          */
2336         switch (sc->link_speed) {
2337         case VIRTCHNL_LINK_SPEED_100MB:
2338                 ifmr->ifm_active |= IFM_100_TX;
2339                 break;
2340         case VIRTCHNL_LINK_SPEED_1GB:
2341                 ifmr->ifm_active |= IFM_1000_T;
2342                 break;
2343         case VIRTCHNL_LINK_SPEED_10GB:
2344                 ifmr->ifm_active |= IFM_10G_SR;
2345                 break;
2346         case VIRTCHNL_LINK_SPEED_20GB:
2347         case VIRTCHNL_LINK_SPEED_25GB:
2348                 ifmr->ifm_active |= IFM_25G_SR;
2349                 break;
2350         case VIRTCHNL_LINK_SPEED_40GB:
2351                 ifmr->ifm_active |= IFM_40G_SR4;
2352                 break;
2353         default:
2354                 ifmr->ifm_active |= IFM_UNKNOWN;
2355                 break;
2356         }
2357
2358         mtx_unlock(&sc->mtx);
2359         INIT_DBG_IF(ifp, "end");
2360         return;
2361 }
2362
2363 /*********************************************************************
2364  *
2365  *  Media Ioctl callback
2366  *
2367  *  This routine is called when the user changes speed/duplex using
2368  *  media/mediopt option with ifconfig.
2369  *
2370  **********************************************************************/
2371 static int
2372 ixlv_media_change(struct ifnet * ifp)
2373 {
2374         struct ixl_vsi *vsi = ifp->if_softc;
2375         struct ifmedia *ifm = &vsi->media;
2376
2377         INIT_DBG_IF(ifp, "begin");
2378
2379         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2380                 return (EINVAL);
2381
2382         if_printf(ifp, "Changing speed is not supported\n");
2383
2384         INIT_DBG_IF(ifp, "end");
2385         return (ENODEV);
2386 }
2387
2388
2389 /*********************************************************************
2390  *  Multicast Initialization
2391  *
2392  *  This routine is called by init to reset a fresh state.
2393  *
2394  **********************************************************************/
2395
2396 static void
2397 ixlv_init_multi(struct ixl_vsi *vsi)
2398 {
2399         struct ixlv_mac_filter *f;
2400         struct ixlv_sc  *sc = vsi->back;
2401         int                     mcnt = 0;
2402
2403         IOCTL_DBG_IF(vsi->ifp, "begin");
2404
2405         /* First clear any multicast filters */
2406         SLIST_FOREACH(f, sc->mac_filters, next) {
2407                 if ((f->flags & IXL_FILTER_USED)
2408                     && (f->flags & IXL_FILTER_MC)) {
2409                         f->flags |= IXL_FILTER_DEL;
2410                         mcnt++;
2411                 }
2412         }
2413         if (mcnt > 0)
2414                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2415                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2416                     sc);
2417
2418         IOCTL_DBG_IF(vsi->ifp, "end");
2419 }
2420
2421 static void
2422 ixlv_add_multi(struct ixl_vsi *vsi)
2423 {
2424         struct ifmultiaddr      *ifma;
2425         struct ifnet            *ifp = vsi->ifp;
2426         struct ixlv_sc  *sc = vsi->back;
2427         int                     mcnt = 0;
2428
2429         IOCTL_DBG_IF(ifp, "begin");
2430
2431         if_maddr_rlock(ifp);
2432         /*
2433         ** Get a count, to decide if we
2434         ** simply use multicast promiscuous.
2435         */
2436         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2437                 if (ifma->ifma_addr->sa_family != AF_LINK)
2438                         continue;
2439                 mcnt++;
2440         }
2441         if_maddr_runlock(ifp);
2442
2443         /* TODO: Remove -- cannot set promiscuous mode in a VF */
2444         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2445                 /* delete all multicast filters */
2446                 ixlv_init_multi(vsi);
2447                 sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
2448                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2449                     IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2450                     sc);
2451                 IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2452                 return;
2453         }
2454
2455         mcnt = 0;
2456         if_maddr_rlock(ifp);
2457         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2458                 if (ifma->ifma_addr->sa_family != AF_LINK)
2459                         continue;
2460                 if (!ixlv_add_mac_filter(sc,
2461                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2462                     IXL_FILTER_MC))
2463                         mcnt++;
2464         }
2465         if_maddr_runlock(ifp);
2466         /*
2467         ** Notify AQ task that sw filters need to be
2468         ** added to hw list
2469         */
2470         if (mcnt > 0)
2471                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2472                     IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2473                     sc);
2474
2475         IOCTL_DBG_IF(ifp, "end");
2476 }
2477
2478 static void
2479 ixlv_del_multi(struct ixl_vsi *vsi)
2480 {
2481         struct ixlv_mac_filter *f;
2482         struct ifmultiaddr      *ifma;
2483         struct ifnet            *ifp = vsi->ifp;
2484         struct ixlv_sc  *sc = vsi->back;
2485         int                     mcnt = 0;
2486         bool            match = FALSE;
2487
2488         IOCTL_DBG_IF(ifp, "begin");
2489
2490         /* Search for removed multicast addresses */
2491         if_maddr_rlock(ifp);
2492         SLIST_FOREACH(f, sc->mac_filters, next) {
2493                 if ((f->flags & IXL_FILTER_USED)
2494                     && (f->flags & IXL_FILTER_MC)) {
2495                         /* check if mac address in filter is in sc's list */
2496                         match = FALSE;
2497                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2498                                 if (ifma->ifma_addr->sa_family != AF_LINK)
2499                                         continue;
2500                                 u8 *mc_addr =
2501                                     (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2502                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
2503                                         match = TRUE;
2504                                         break;
2505                                 }
2506                         }
2507                         /* if this filter is not in the sc's list, remove it */
2508                         if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2509                                 f->flags |= IXL_FILTER_DEL;
2510                                 mcnt++;
2511                                 IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2512                                     MAC_FORMAT_ARGS(f->macaddr));
2513                         }
2514                         else if (match == FALSE)
2515                                 IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2516                                     MAC_FORMAT_ARGS(f->macaddr));
2517                 }
2518         }
2519         if_maddr_runlock(ifp);
2520
2521         if (mcnt > 0)
2522                 ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2523                     IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2524                     sc);
2525
2526         IOCTL_DBG_IF(ifp, "end");
2527 }
2528
2529 /*********************************************************************
2530  *  Timer routine
2531  *
2532  *  This routine checks for link status,updates statistics,
2533  *  and runs the watchdog check.
2534  *
2535  **********************************************************************/
2536
2537 static void
2538 ixlv_local_timer(void *arg)
2539 {
2540         struct ixlv_sc          *sc = arg;
2541         struct i40e_hw          *hw = &sc->hw;
2542         struct ixl_vsi          *vsi = &sc->vsi;
2543         u32                     val;
2544
2545         IXLV_CORE_LOCK_ASSERT(sc);
2546
2547         /* If Reset is in progress just bail */
2548         if (sc->init_state == IXLV_RESET_PENDING)
2549                 return;
2550
2551         /* Check for when PF triggers a VF reset */
2552         val = rd32(hw, I40E_VFGEN_RSTAT) &
2553             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2554
2555         if (val != VIRTCHNL_VFR_VFACTIVE
2556             && val != VIRTCHNL_VFR_COMPLETED) {
2557                 DDPRINTF(sc->dev, "reset in progress! (%d)", val);
2558                 return;
2559         }
2560
2561         ixlv_request_stats(sc);
2562
2563         /* clean and process any events */
2564         taskqueue_enqueue(sc->tq, &sc->aq_irq);
2565
2566         /* Increment stat when a queue shows hung */
2567         if (ixl_queue_hang_check(vsi))
2568                 sc->watchdog_events++;
2569
2570         callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2571 }
2572
2573 /*
2574 ** Note: this routine updates the OS on the link state
2575 **      the real check of the hardware only happens with
2576 **      a link interrupt.
2577 */
2578 void
2579 ixlv_update_link_status(struct ixlv_sc *sc)
2580 {
2581         struct ixl_vsi          *vsi = &sc->vsi;
2582         struct ifnet            *ifp = vsi->ifp;
2583
2584         if (sc->link_up){ 
2585                 if (vsi->link_active == FALSE) {
2586                         if (bootverbose)
2587                                 if_printf(ifp,"Link is Up, %s\n",
2588                                     ixlv_vc_speed_to_string(sc->link_speed));
2589                         vsi->link_active = TRUE;
2590                         if_link_state_change(ifp, LINK_STATE_UP);
2591                 }
2592         } else { /* Link down */
2593                 if (vsi->link_active == TRUE) {
2594                         if (bootverbose)
2595                                 if_printf(ifp,"Link is Down\n");
2596                         if_link_state_change(ifp, LINK_STATE_DOWN);
2597                         vsi->link_active = FALSE;
2598                 }
2599         }
2600
2601         return;
2602 }
2603
2604 /*********************************************************************
2605  *
2606  *  This routine disables all traffic on the adapter by issuing a
2607  *  global reset on the MAC and deallocates TX/RX buffers.
2608  *
2609  **********************************************************************/
2610
2611 static void
2612 ixlv_stop(struct ixlv_sc *sc)
2613 {
2614         struct ifnet *ifp;
2615         int start;
2616
2617         ifp = sc->vsi.ifp;
2618         INIT_DBG_IF(ifp, "begin");
2619
2620         IXLV_CORE_LOCK_ASSERT(sc);
2621
2622         ixl_vc_flush(&sc->vc_mgr);
2623         ixlv_disable_queues(sc);
2624
2625         start = ticks;
2626         while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2627             ((ticks - start) < hz/10))
2628                 ixlv_do_adminq_locked(sc);
2629
2630         /* Stop the local timer */
2631         callout_stop(&sc->timer);
2632
2633         INIT_DBG_IF(ifp, "end");
2634 }
2635
2636 /* Free a single queue struct */
2637 static void
2638 ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
2639 {
2640         struct tx_ring *txr = &que->txr;
2641         struct rx_ring *rxr = &que->rxr;
2642
2643         if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2644                 return;
2645         IXL_TX_LOCK(txr);
2646         if (txr->br)
2647                 buf_ring_free(txr->br, M_DEVBUF);
2648         ixl_free_que_tx(que);
2649         if (txr->base)
2650                 i40e_free_dma_mem(&sc->hw, &txr->dma);
2651         IXL_TX_UNLOCK(txr);
2652         IXL_TX_LOCK_DESTROY(txr);
2653
2654         if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2655                 return;
2656         IXL_RX_LOCK(rxr);
2657         ixl_free_que_rx(que);
2658         if (rxr->base)
2659                 i40e_free_dma_mem(&sc->hw, &rxr->dma);
2660         IXL_RX_UNLOCK(rxr);
2661         IXL_RX_LOCK_DESTROY(rxr);
2662 }
2663
2664 /*********************************************************************
2665  *
2666  *  Free all station queue structs.
2667  *
2668  **********************************************************************/
2669 static void
2670 ixlv_free_queues(struct ixl_vsi *vsi)
2671 {
2672         struct ixlv_sc  *sc = (struct ixlv_sc *)vsi->back;
2673         struct ixl_queue        *que = vsi->queues;
2674
2675         for (int i = 0; i < vsi->num_queues; i++, que++) {
2676                 /* First, free the MSI-X resources */
2677                 ixlv_free_msix_resources(sc, que);
2678                 /* Then free other queue data */
2679                 ixlv_free_queue(sc, que);
2680         }
2681
2682         free(vsi->queues, M_DEVBUF);
2683 }
2684
2685 static void
2686 ixlv_config_rss_reg(struct ixlv_sc *sc)
2687 {
2688         struct i40e_hw  *hw = &sc->hw;
2689         struct ixl_vsi  *vsi = &sc->vsi;
2690         u32             lut = 0;
2691         u64             set_hena = 0, hena;
2692         int             i, j, que_id;
2693         u32             rss_seed[IXL_RSS_KEY_SIZE_REG];
2694 #ifdef RSS
2695         u32             rss_hash_config;
2696 #endif
2697         
2698         /* Don't set up RSS if using a single queue */
2699         if (vsi->num_queues == 1) {
2700                 wr32(hw, I40E_VFQF_HENA(0), 0);
2701                 wr32(hw, I40E_VFQF_HENA(1), 0);
2702                 ixl_flush(hw);
2703                 return;
2704         }
2705
2706 #ifdef RSS
2707         /* Fetch the configured RSS key */
2708         rss_getkey((uint8_t *) &rss_seed);
2709 #else
2710         ixl_get_default_rss_key(rss_seed);
2711 #endif
2712
2713         /* Fill out hash function seed */
2714         for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2715                 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2716
2717         /* Enable PCTYPES for RSS: */
2718 #ifdef RSS
2719         rss_hash_config = rss_gethashconfig();
2720         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2721                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2722         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2723                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2724         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2725                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2726         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2727                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2728         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2729                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2730         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2731                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2732         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2733                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2734 #else
2735         set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2736 #endif
2737         hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2738             ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2739         hena |= set_hena;
2740         wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2741         wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2742
2743         /* Populate the LUT with max no. of queues in round robin fashion */
2744         for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2745                 if (j == vsi->num_queues)
2746                         j = 0;
2747 #ifdef RSS
2748                 /*
2749                  * Fetch the RSS bucket id for the given indirection entry.
2750                  * Cap it at the number of configured buckets (which is
2751                  * num_queues.)
2752                  */
2753                 que_id = rss_get_indirection_to_bucket(i);
2754                 que_id = que_id % vsi->num_queues;
2755 #else
2756                 que_id = j;
2757 #endif
2758                 /* lut = 4-byte sliding window of 4 lut entries */
2759                 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2760                 /* On i = 3, we have 4 entries in lut; write to the register */
2761                 if ((i & 3) == 3) {
2762                         wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2763                         DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2764                 }
2765         }
2766         ixl_flush(hw);
2767 }
2768
2769 static void
2770 ixlv_config_rss_pf(struct ixlv_sc *sc)
2771 {
2772         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2773             IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2774
2775         ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2776             IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2777
2778         ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2779             IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2780 }
2781
2782 /*
2783 ** ixlv_config_rss - setup RSS 
2784 **
2785 ** RSS keys and table are cleared on VF reset.
2786 */
2787 static void
2788 ixlv_config_rss(struct ixlv_sc *sc)
2789 {
2790         if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2791                 DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2792                 ixlv_config_rss_reg(sc);
2793         } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2794                 DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2795                 ixlv_config_rss_pf(sc);
2796         } else
2797                 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2798 }
2799
2800 /*
2801 ** This routine refreshes vlan filters, called by init
2802 ** it scans the filter table and then updates the AQ
2803 */
2804 static void
2805 ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2806 {
2807         struct ixl_vsi                  *vsi = &sc->vsi;
2808         struct ixlv_vlan_filter *f;
2809         int                             cnt = 0;
2810
2811         if (vsi->num_vlans == 0)
2812                 return;
2813         /*
2814         ** Scan the filter table for vlan entries,
2815         ** and if found call for the AQ update.
2816         */
2817         SLIST_FOREACH(f, sc->vlan_filters, next)
2818                 if (f->flags & IXL_FILTER_ADD)
2819                         cnt++;
2820         if (cnt > 0)
2821                 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2822                     IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2823 }
2824
2825
2826 /*
2827 ** This routine adds new MAC filters to the sc's list;
2828 ** these are later added in hardware by sending a virtual
2829 ** channel message.
2830 */
2831 static int
2832 ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2833 {
2834         struct ixlv_mac_filter  *f;
2835
2836         /* Does one already exist? */
2837         f = ixlv_find_mac_filter(sc, macaddr);
2838         if (f != NULL) {
2839                 IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2840                     MAC_FORMAT_ARGS(macaddr));
2841                 return (EEXIST);
2842         }
2843
2844         /* If not, get a new empty filter */
2845         f = ixlv_get_mac_filter(sc);
2846         if (f == NULL) {
2847                 if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2848                     __func__);
2849                 return (ENOMEM);
2850         }
2851
2852         IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2853             MAC_FORMAT_ARGS(macaddr));
2854
2855         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2856         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2857         f->flags |= flags;
2858         return (0);
2859 }
2860
2861 /*
2862 ** Marks a MAC filter for deletion.
2863 */
2864 static int
2865 ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2866 {
2867         struct ixlv_mac_filter  *f;
2868
2869         f = ixlv_find_mac_filter(sc, macaddr);
2870         if (f == NULL)
2871                 return (ENOENT);
2872
2873         f->flags |= IXL_FILTER_DEL;
2874         return (0);
2875 }
2876
2877 /*
2878 ** Tasklet handler for MSIX Adminq interrupts
2879 **  - done outside interrupt context since it might sleep
2880 */
2881 static void
2882 ixlv_do_adminq(void *context, int pending)
2883 {
2884         struct ixlv_sc          *sc = context;
2885
2886         mtx_lock(&sc->mtx);
2887         ixlv_do_adminq_locked(sc);
2888         mtx_unlock(&sc->mtx);
2889         return;
2890 }
2891
2892 static void
2893 ixlv_do_adminq_locked(struct ixlv_sc *sc)
2894 {
2895         struct i40e_hw                  *hw = &sc->hw;
2896         struct i40e_arq_event_info      event;
2897         struct virtchnl_msg     *v_msg;
2898         device_t                        dev = sc->dev;
2899         u16                             result = 0;
2900         u32                             reg, oldreg;
2901         i40e_status                     ret;
2902         bool                            aq_error = false;
2903
2904         IXLV_CORE_LOCK_ASSERT(sc);
2905
2906         event.buf_len = IXL_AQ_BUF_SZ;
2907         event.msg_buf = sc->aq_buffer;
2908         v_msg = (struct virtchnl_msg *)&event.desc;
2909
2910         do {
2911                 ret = i40e_clean_arq_element(hw, &event, &result);
2912                 if (ret)
2913                         break;
2914                 ixlv_vc_completion(sc, v_msg->v_opcode,
2915                     v_msg->v_retval, event.msg_buf, event.msg_len);
2916                 if (result != 0)
2917                         bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2918         } while (result);
2919
2920         /* check for Admin queue errors */
2921         oldreg = reg = rd32(hw, hw->aq.arq.len);
2922         if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2923                 device_printf(dev, "ARQ VF Error detected\n");
2924                 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2925                 aq_error = true;
2926         }
2927         if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2928                 device_printf(dev, "ARQ Overflow Error detected\n");
2929                 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2930                 aq_error = true;
2931         }
2932         if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2933                 device_printf(dev, "ARQ Critical Error detected\n");
2934                 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2935                 aq_error = true;
2936         }
2937         if (oldreg != reg)
2938                 wr32(hw, hw->aq.arq.len, reg);
2939
2940         oldreg = reg = rd32(hw, hw->aq.asq.len);
2941         if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2942                 device_printf(dev, "ASQ VF Error detected\n");
2943                 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2944                 aq_error = true;
2945         }
2946         if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2947                 device_printf(dev, "ASQ Overflow Error detected\n");
2948                 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2949                 aq_error = true;
2950         }
2951         if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2952                 device_printf(dev, "ASQ Critical Error detected\n");
2953                 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2954                 aq_error = true;
2955         }
2956         if (oldreg != reg)
2957                 wr32(hw, hw->aq.asq.len, reg);
2958
2959         if (aq_error) {
2960                 /* Need to reset adapter */
2961                 device_printf(dev, "WARNING: Resetting!\n");
2962                 sc->init_state = IXLV_RESET_REQUIRED;
2963                 ixlv_stop(sc);
2964                 ixlv_init_locked(sc);
2965         }
2966         ixlv_enable_adminq_irq(hw);
2967 }
2968
2969 static void
2970 ixlv_add_sysctls(struct ixlv_sc *sc)
2971 {
2972         device_t dev = sc->dev;
2973         struct ixl_vsi *vsi = &sc->vsi;
2974         struct i40e_eth_stats *es = &vsi->eth_stats;
2975
2976         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2977         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2978         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2979
2980         struct sysctl_oid *vsi_node, *queue_node;
2981         struct sysctl_oid_list *vsi_list, *queue_list;
2982
2983 #define QUEUE_NAME_LEN 32
2984         char queue_namebuf[QUEUE_NAME_LEN];
2985
2986         struct ixl_queue *queues = vsi->queues;
2987         struct tx_ring *txr;
2988         struct rx_ring *rxr;
2989
2990         /* Driver statistics sysctls */
2991         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2992                         CTLFLAG_RD, &sc->watchdog_events,
2993                         "Watchdog timeouts");
2994         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2995                         CTLFLAG_RD, &sc->admin_irq,
2996                         "Admin Queue IRQ Handled");
2997
2998         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
2999                         CTLFLAG_RD, &vsi->num_tx_desc, 0,
3000                         "TX ring size");
3001         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
3002                         CTLFLAG_RD, &vsi->num_rx_desc, 0,
3003                         "RX ring size");
3004
3005         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
3006                         CTLTYPE_STRING | CTLFLAG_RD,
3007                         sc, 0, ixlv_sysctl_current_speed,
3008                         "A", "Current Port Speed");
3009
3010         /* VSI statistics sysctls */
3011         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3012                                    CTLFLAG_RD, NULL, "VSI-specific statistics");
3013         vsi_list = SYSCTL_CHILDREN(vsi_node);
3014
3015         struct ixl_sysctl_info ctls[] =
3016         {
3017                 {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3018                 {&es->rx_unicast, "ucast_pkts_rcvd",
3019                         "Unicast Packets Received"},
3020                 {&es->rx_multicast, "mcast_pkts_rcvd",
3021                         "Multicast Packets Received"},
3022                 {&es->rx_broadcast, "bcast_pkts_rcvd",
3023                         "Broadcast Packets Received"},
3024                 {&es->rx_discards, "rx_discards", "Discarded RX packets"},
3025                 {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
3026                 {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3027                 {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3028                 {&es->tx_multicast, "mcast_pkts_txd",
3029                         "Multicast Packets Transmitted"},
3030                 {&es->tx_broadcast, "bcast_pkts_txd",
3031                         "Broadcast Packets Transmitted"},
3032                 {&es->tx_errors, "tx_errors", "TX packet errors"},
3033                 // end
3034                 {0,0,0}
3035         };
3036         struct ixl_sysctl_info *entry = ctls;
3037         while (entry->stat != NULL)
3038         {
3039                 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
3040                                 CTLFLAG_RD, entry->stat,
3041                                 entry->description);
3042                 entry++;
3043         }
3044
3045         /* Queue sysctls */
3046         for (int q = 0; q < vsi->num_queues; q++) {
3047                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3048                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3049                                              CTLFLAG_RD, NULL, "Queue Name");
3050                 queue_list = SYSCTL_CHILDREN(queue_node);
3051
3052                 txr = &(queues[q].txr);
3053                 rxr = &(queues[q].rxr);
3054
3055                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3056                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3057                                 "m_defrag() failed");
3058                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
3059                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3060                                 "Driver dropped packets");
3061                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
3062                                 CTLFLAG_RD, &(queues[q].irqs),
3063                                 "irqs on this queue");
3064                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3065                                 CTLFLAG_RD, &(queues[q].tso),
3066                                 "TSO");
3067                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
3068                                 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
3069                                 "Driver tx dma failure in xmit");
3070                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3071                                 CTLFLAG_RD, &(txr->no_desc),
3072                                 "Queue No Descriptor Available");
3073                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3074                                 CTLFLAG_RD, &(txr->total_packets),
3075                                 "Queue Packets Transmitted");
3076                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3077                                 CTLFLAG_RD, &(txr->tx_bytes),
3078                                 "Queue Bytes Transmitted");
3079                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3080                                 CTLFLAG_RD, &(rxr->rx_packets),
3081                                 "Queue Packets Received");
3082                 SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3083                                 CTLFLAG_RD, &(rxr->rx_bytes),
3084                                 "Queue Bytes Received");
3085                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3086                                 CTLFLAG_RD, &(rxr->itr), 0,
3087                                 "Queue Rx ITR Interval");
3088                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3089                                 CTLFLAG_RD, &(txr->itr), 0,
3090                                 "Queue Tx ITR Interval");
3091
3092 #ifdef IXL_DEBUG
3093                 /* Examine queue state */
3094                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
3095                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3096                                 sizeof(struct ixl_queue),
3097                                 ixlv_sysctl_qtx_tail_handler, "IU",
3098                                 "Queue Transmit Descriptor Tail");
3099                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
3100                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3101                                 sizeof(struct ixl_queue),
3102                                 ixlv_sysctl_qrx_tail_handler, "IU",
3103                                 "Queue Receive Descriptor Tail");
3104                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3105                                 CTLFLAG_RD, &(txr.watchdog_timer), 0,
3106                                 "Ticks before watchdog event is triggered");
3107 #endif
3108         }
3109 }
3110
3111 static void
3112 ixlv_init_filters(struct ixlv_sc *sc)
3113 {
3114         sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3115             M_DEVBUF, M_NOWAIT | M_ZERO);
3116         SLIST_INIT(sc->mac_filters);
3117         sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3118             M_DEVBUF, M_NOWAIT | M_ZERO);
3119         SLIST_INIT(sc->vlan_filters);
3120         return;
3121 }
3122
3123 static void
3124 ixlv_free_filters(struct ixlv_sc *sc)
3125 {
3126         struct ixlv_mac_filter *f;
3127         struct ixlv_vlan_filter *v;
3128
3129         while (!SLIST_EMPTY(sc->mac_filters)) {
3130                 f = SLIST_FIRST(sc->mac_filters);
3131                 SLIST_REMOVE_HEAD(sc->mac_filters, next);
3132                 free(f, M_DEVBUF);
3133         }
3134         free(sc->mac_filters, M_DEVBUF);
3135         while (!SLIST_EMPTY(sc->vlan_filters)) {
3136                 v = SLIST_FIRST(sc->vlan_filters);
3137                 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3138                 free(v, M_DEVBUF);
3139         }
3140         free(sc->vlan_filters, M_DEVBUF);
3141         return;
3142 }
3143
3144 static char *
3145 ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
3146 {
3147         int index;
3148
3149         char *speeds[] = {
3150                 "Unknown",
3151                 "100 Mbps",
3152                 "1 Gbps",
3153                 "10 Gbps",
3154                 "40 Gbps",
3155                 "20 Gbps",
3156                 "25 Gbps",
3157         };
3158
3159         switch (link_speed) {
3160         case VIRTCHNL_LINK_SPEED_100MB:
3161                 index = 1;
3162                 break;
3163         case VIRTCHNL_LINK_SPEED_1GB:
3164                 index = 2;
3165                 break;
3166         case VIRTCHNL_LINK_SPEED_10GB:
3167                 index = 3;
3168                 break;
3169         case VIRTCHNL_LINK_SPEED_40GB:
3170                 index = 4;
3171                 break;
3172         case VIRTCHNL_LINK_SPEED_20GB:
3173                 index = 5;
3174                 break;
3175         case VIRTCHNL_LINK_SPEED_25GB:
3176                 index = 6;
3177                 break;
3178         case VIRTCHNL_LINK_SPEED_UNKNOWN:
3179         default:
3180                 index = 0;
3181                 break;
3182         }
3183
3184         return speeds[index];
3185 }
3186
3187 static int
3188 ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3189 {
3190         struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
3191         int error = 0;
3192
3193         error = sysctl_handle_string(oidp,
3194           ixlv_vc_speed_to_string(sc->link_speed),
3195           8, req);
3196         return (error);
3197 }
3198
3199 #ifdef IXL_DEBUG
3200 /**
3201  * ixlv_sysctl_qtx_tail_handler
3202  * Retrieves I40E_QTX_TAIL1 value from hardware
3203  * for a sysctl.
3204  */
3205 static int 
3206 ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3207 {
3208         struct ixl_queue *que;
3209         int error;
3210         u32 val;
3211
3212         que = ((struct ixl_queue *)oidp->oid_arg1);
3213         if (!que) return 0;
3214
3215         val = rd32(que->vsi->hw, que->txr.tail);
3216         error = sysctl_handle_int(oidp, &val, 0, req);
3217         if (error || !req->newptr)
3218                 return error;
3219         return (0);
3220 }
3221
3222 /**
3223  * ixlv_sysctl_qrx_tail_handler
3224  * Retrieves I40E_QRX_TAIL1 value from hardware
3225  * for a sysctl.
3226  */
3227 static int 
3228 ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3229 {
3230         struct ixl_queue *que;
3231         int error;
3232         u32 val;
3233
3234         que = ((struct ixl_queue *)oidp->oid_arg1);
3235         if (!que) return 0;
3236
3237         val = rd32(que->vsi->hw, que->rxr.tail);
3238         error = sysctl_handle_int(oidp, &val, 0, req);
3239         if (error || !req->newptr)
3240                 return error;
3241         return (0);
3242 }
3243 #endif
3244