]> CyberLeo.Net >> Repos - FreeBSD/releng/10.1.git/blob - sys/dev/ixl/if_ixl.c
Copy stable/10@r272459 to releng/10.1 as part of
[FreeBSD/releng/10.1.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixl_pf.h"
39
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 char ixl_driver_version[] = "1.2.2";
44
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *  Last field stores an index into ixl_strings
50  *  Last entry must be all 0s
51  *
52  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53  *********************************************************************/
54
55 static ixl_vendor_info_t ixl_vendor_info_array[] =
56 {
57         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65         /* required last entry */
66         {0, 0, 0, 0, 0}
67 };
68
69 /*********************************************************************
70  *  Table of branding strings
71  *********************************************************************/
72
73 static char    *ixl_strings[] = {
74         "Intel(R) Ethernet Connection XL710 Driver"
75 };
76
77
78 /*********************************************************************
79  *  Function prototypes
80  *********************************************************************/
81 static int      ixl_probe(device_t);
82 static int      ixl_attach(device_t);
83 static int      ixl_detach(device_t);
84 static int      ixl_shutdown(device_t);
85 static int      ixl_get_hw_capabilities(struct ixl_pf *);
86 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88 static void     ixl_init(void *);
89 static void     ixl_init_locked(struct ixl_pf *);
90 static void     ixl_stop(struct ixl_pf *);
91 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92 static int      ixl_media_change(struct ifnet *);
93 static void     ixl_update_link_status(struct ixl_pf *);
94 static int      ixl_allocate_pci_resources(struct ixl_pf *);
95 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
96 static int      ixl_setup_stations(struct ixl_pf *);
97 static int      ixl_setup_vsi(struct ixl_vsi *);
98 static int      ixl_initialize_vsi(struct ixl_vsi *);
99 static int      ixl_assign_vsi_msix(struct ixl_pf *);
100 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
101 static int      ixl_init_msix(struct ixl_pf *);
102 static void     ixl_configure_msix(struct ixl_pf *);
103 static void     ixl_configure_itr(struct ixl_pf *);
104 static void     ixl_configure_legacy(struct ixl_pf *);
105 static void     ixl_free_pci_resources(struct ixl_pf *);
106 static void     ixl_local_timer(void *);
107 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
108 static bool     ixl_config_link(struct i40e_hw *);
109 static void     ixl_config_rss(struct ixl_vsi *);
110 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
111 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
112
113 static void     ixl_enable_rings(struct ixl_vsi *);
114 static void     ixl_disable_rings(struct ixl_vsi *);
115 static void     ixl_enable_intr(struct ixl_vsi *);
116 static void     ixl_disable_intr(struct ixl_vsi *);
117
118 static void     ixl_enable_adminq(struct i40e_hw *);
119 static void     ixl_disable_adminq(struct i40e_hw *);
120 static void     ixl_enable_queue(struct i40e_hw *, int);
121 static void     ixl_disable_queue(struct i40e_hw *, int);
122 static void     ixl_enable_legacy(struct i40e_hw *);
123 static void     ixl_disable_legacy(struct i40e_hw *);
124
125 static void     ixl_set_promisc(struct ixl_vsi *);
126 static void     ixl_add_multi(struct ixl_vsi *);
127 static void     ixl_del_multi(struct ixl_vsi *);
128 static void     ixl_register_vlan(void *, struct ifnet *, u16);
129 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
130 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
131
132 static void     ixl_init_filters(struct ixl_vsi *);
133 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
136 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
137 static struct ixl_mac_filter *
138                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
140
141 /* Sysctl debug interface */
142 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
143 static void     ixl_print_debug_info(struct ixl_pf *);
144
145 /* The MSI/X Interrupt handlers */
146 static void     ixl_intr(void *);
147 static void     ixl_msix_que(void *);
148 static void     ixl_msix_adminq(void *);
149 static void     ixl_handle_mdd_event(struct ixl_pf *);
150
151 /* Deferred interrupt tasklets */
152 static void     ixl_do_adminq(void *, int);
153
154 /* Sysctl handlers */
155 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
158
159 /* Statistics */
160 static void     ixl_add_hw_stats(struct ixl_pf *);
161 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164                     struct sysctl_oid_list *,
165                     struct i40e_eth_stats *);
166 static void     ixl_update_stats_counters(struct ixl_pf *);
167 static void     ixl_update_eth_stats(struct ixl_vsi *);
168 static void     ixl_pf_reset_stats(struct ixl_pf *);
169 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
170 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
171                     u64 *, u64 *);
172 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
173                     u64 *, u64 *);
174
175 #ifdef IXL_DEBUG
176 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179 static int      ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180 static int      ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
181 #endif
182
183 /*********************************************************************
184  *  FreeBSD Device Interface Entry Points
185  *********************************************************************/
186
187 static device_method_t ixl_methods[] = {
188         /* Device interface */
189         DEVMETHOD(device_probe, ixl_probe),
190         DEVMETHOD(device_attach, ixl_attach),
191         DEVMETHOD(device_detach, ixl_detach),
192         DEVMETHOD(device_shutdown, ixl_shutdown),
193         {0, 0}
194 };
195
196 static driver_t ixl_driver = {
197         "ixl", ixl_methods, sizeof(struct ixl_pf),
198 };
199
200 devclass_t ixl_devclass;
201 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
202
203 MODULE_DEPEND(ixl, pci, 1, 1, 1);
204 MODULE_DEPEND(ixl, ether, 1, 1, 1);
205
206 /*
207 ** Global reset mutex
208 */
209 static struct mtx ixl_reset_mtx;
210
211 /*
212 ** TUNEABLE PARAMETERS:
213 */
214
215 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216                    "IXL driver parameters");
217
218 /*
219  * MSIX should be the default for best performance,
220  * but this allows it to be forced off for testing.
221  */
222 static int ixl_enable_msix = 1;
223 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225     "Enable MSI-X interrupts");
226
227 /*
228 ** Number of descriptors per ring:
229 **   - TX and RX are the same size
230 */
231 static int ixl_ringsz = DEFAULT_RING;
232 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234     &ixl_ringsz, 0, "Descriptor Ring Size");
235
236 /* 
237 ** This can be set manually, if left as 0 the
238 ** number of queues will be calculated based
239 ** on cpus and msix vectors available.
240 */
241 int ixl_max_queues = 0;
242 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244     &ixl_max_queues, 0, "Number of Queues");
245
246 /*
247 ** Controls for Interrupt Throttling 
248 **      - true/false for dynamic adjustment
249 **      - default values for static ITR
250 */
251 int ixl_dynamic_rx_itr = 0;
252 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
255
256 int ixl_dynamic_tx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
260
261 int ixl_rx_itr = IXL_ITR_8K;
262 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264     &ixl_rx_itr, 0, "RX Interrupt Rate");
265
266 int ixl_tx_itr = IXL_ITR_4K;
267 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269     &ixl_tx_itr, 0, "TX Interrupt Rate");
270
271 #ifdef IXL_FDIR
272 static int ixl_enable_fdir = 1;
273 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274 /* Rate at which we sample */
275 int ixl_atr_rate = 20;
276 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
277 #endif
278
279 static char *ixl_fc_string[6] = {
280         "None",
281         "Rx",
282         "Tx",
283         "Full",
284         "Priority",
285         "Default"
286 };
287
288
289 /*********************************************************************
290  *  Device identification routine
291  *
292  *  ixl_probe determines if the driver should be loaded on
293  *  the hardware based on PCI vendor/device id of the device.
294  *
295  *  return BUS_PROBE_DEFAULT on success, positive on failure
296  *********************************************************************/
297
298 static int
299 ixl_probe(device_t dev)
300 {
301         ixl_vendor_info_t *ent;
302
303         u16     pci_vendor_id, pci_device_id;
304         u16     pci_subvendor_id, pci_subdevice_id;
305         char    device_name[256];
306         static bool lock_init = FALSE;
307
308         INIT_DEBUGOUT("ixl_probe: begin");
309
310         pci_vendor_id = pci_get_vendor(dev);
311         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
312                 return (ENXIO);
313
314         pci_device_id = pci_get_device(dev);
315         pci_subvendor_id = pci_get_subvendor(dev);
316         pci_subdevice_id = pci_get_subdevice(dev);
317
318         ent = ixl_vendor_info_array;
319         while (ent->vendor_id != 0) {
320                 if ((pci_vendor_id == ent->vendor_id) &&
321                     (pci_device_id == ent->device_id) &&
322
323                     ((pci_subvendor_id == ent->subvendor_id) ||
324                      (ent->subvendor_id == 0)) &&
325
326                     ((pci_subdevice_id == ent->subdevice_id) ||
327                      (ent->subdevice_id == 0))) {
328                         sprintf(device_name, "%s, Version - %s",
329                                 ixl_strings[ent->index],
330                                 ixl_driver_version);
331                         device_set_desc_copy(dev, device_name);
332                         /* One shot mutex init */
333                         if (lock_init == FALSE) {
334                                 lock_init = TRUE;
335                                 mtx_init(&ixl_reset_mtx,
336                                     "ixl_reset",
337                                     "IXL RESET Lock", MTX_DEF);
338                         }
339                         return (BUS_PROBE_DEFAULT);
340                 }
341                 ent++;
342         }
343         return (ENXIO);
344 }
345
346 /*********************************************************************
347  *  Device initialization routine
348  *
349  *  The attach entry point is called when the driver is being loaded.
350  *  This routine identifies the type of hardware, allocates all resources
351  *  and initializes the hardware.
352  *
353  *  return 0 on success, positive on failure
354  *********************************************************************/
355
356 static int
357 ixl_attach(device_t dev)
358 {
359         struct ixl_pf   *pf;
360         struct i40e_hw  *hw;
361         struct ixl_vsi *vsi;
362         u16             bus;
363         int             error = 0;
364
365         INIT_DEBUGOUT("ixl_attach: begin");
366
367         /* Allocate, clear, and link in our primary soft structure */
368         pf = device_get_softc(dev);
369         pf->dev = pf->osdep.dev = dev;
370         hw = &pf->hw;
371
372         /*
373         ** Note this assumes we have a single embedded VSI,
374         ** this could be enhanced later to allocate multiple
375         */
376         vsi = &pf->vsi;
377         vsi->dev = pf->dev;
378
379         /* Core Lock Init*/
380         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
381
382         /* Set up the timer callout */
383         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
384
385         /* Set up sysctls */
386         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
389             pf, 0, ixl_set_flowcntl, "I", "Flow Control");
390
391         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
394             pf, 0, ixl_set_advertise, "I", "Advertised Speed");
395
396         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
399             pf, 0, ixl_current_speed, "A", "Current Port Speed");
400
401         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403             OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
404             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
405
406         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408             OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
409             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
410
411         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413             OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
414             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
415
416         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418             OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
419             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
420
421 #ifdef IXL_DEBUG
422         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
423             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
424             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
425             pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
426
427         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
430             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
431
432         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
435             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
436
437         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439             OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
440             pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
441
442         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444             OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
445             pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
446 #endif
447
448         /* Save off the information about this board */
449         hw->vendor_id = pci_get_vendor(dev);
450         hw->device_id = pci_get_device(dev);
451         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
452         hw->subsystem_vendor_id =
453             pci_read_config(dev, PCIR_SUBVEND_0, 2);
454         hw->subsystem_device_id =
455             pci_read_config(dev, PCIR_SUBDEV_0, 2);
456
457         hw->bus.device = pci_get_slot(dev);
458         hw->bus.func = pci_get_function(dev);
459
460         /* Do PCI setup - map BAR0, etc */
461         if (ixl_allocate_pci_resources(pf)) {
462                 device_printf(dev, "Allocation of PCI resources failed\n");
463                 error = ENXIO;
464                 goto err_out;
465         }
466
467         /* Create for initial debugging use */
468         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
469             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
470             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
471             ixl_debug_info, "I", "Debug Information");
472
473
474         /* Establish a clean starting point */
475         i40e_clear_hw(hw);
476         error = i40e_pf_reset(hw);
477         if (error) {
478                 device_printf(dev,"PF reset failure %x\n", error);
479                 error = EIO;
480                 goto err_out;
481         }
482
483         /* For now always do an initial CORE reset on first device */
484         {
485                 static int      ixl_dev_count;
486                 static int      ixl_dev_track[32];
487                 u32             my_dev;
488                 int             i, found = FALSE;
489                 u16             bus = pci_get_bus(dev);
490
491                 mtx_lock(&ixl_reset_mtx);
492                 my_dev = (bus << 8) | hw->bus.device;
493
494                 for (i = 0; i < ixl_dev_count; i++) {
495                         if (ixl_dev_track[i] == my_dev)
496                                 found = TRUE;
497                 }
498
499                 if (!found) {
500                         u32 reg;
501
502                         ixl_dev_track[ixl_dev_count] = my_dev;
503                         ixl_dev_count++;
504
505                         INIT_DEBUGOUT("Initial CORE RESET\n");
506                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
507                         ixl_flush(hw);
508                         i = 50;
509                         do {
510                                 i40e_msec_delay(50);
511                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
512                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
513                                         break;
514                         } while (i--);
515
516                         /* paranoia */
517                         wr32(hw, I40E_PF_ATQLEN, 0);
518                         wr32(hw, I40E_PF_ATQBAL, 0);
519                         wr32(hw, I40E_PF_ATQBAH, 0);
520                         i40e_clear_pxe_mode(hw);
521                 }
522                 mtx_unlock(&ixl_reset_mtx);
523         }
524
525         /* Set admin queue parameters */
526         hw->aq.num_arq_entries = IXL_AQ_LEN;
527         hw->aq.num_asq_entries = IXL_AQ_LEN;
528         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
529         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
530
531         /* Initialize the shared code */
532         error = i40e_init_shared_code(hw);
533         if (error) {
534                 device_printf(dev,"Unable to initialize the shared code\n");
535                 error = EIO;
536                 goto err_out;
537         }
538
539         /* Set up the admin queue */
540         error = i40e_init_adminq(hw);
541         if (error) {
542                 device_printf(dev, "The driver for the device stopped "
543                     "because the NVM image is newer than expected.\n"
544                     "You must install the most recent version of "
545                     " the network driver.\n");
546                 goto err_out;
547         }
548         device_printf(dev, "%s\n", ixl_fw_version_str(hw));
549
550         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
551             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
552                 device_printf(dev, "The driver for the device detected "
553                     "a newer version of the NVM image than expected.\n"
554                     "Please install the most recent version of the network driver.\n");
555         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
556             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
557                 device_printf(dev, "The driver for the device detected "
558                     "an older version of the NVM image than expected.\n"
559                     "Please update the NVM image.\n");
560
561         /* Clear PXE mode */
562         i40e_clear_pxe_mode(hw);
563
564         /* Get capabilities from the device */
565         error = ixl_get_hw_capabilities(pf);
566         if (error) {
567                 device_printf(dev, "HW capabilities failure!\n");
568                 goto err_get_cap;
569         }
570
571         /* Set up host memory cache */
572         error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
573         if (error) {
574                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
575                 goto err_get_cap;
576         }
577
578         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
579         if (error) {
580                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
581                 goto err_mac_hmc;
582         }
583
584         /* Disable LLDP from the firmware */
585         i40e_aq_stop_lldp(hw, TRUE, NULL);
586
587         i40e_get_mac_addr(hw, hw->mac.addr);
588         error = i40e_validate_mac_addr(hw->mac.addr);
589         if (error) {
590                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
591                 goto err_mac_hmc;
592         }
593         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
594         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
595
596         if (ixl_setup_stations(pf) != 0) { 
597                 device_printf(dev, "setup stations failed!\n");
598                 error = ENOMEM;
599                 goto err_mac_hmc;
600         }
601
602         /* Initialize mac filter list for VSI */
603         SLIST_INIT(&vsi->ftl);
604
605         /* Set up interrupt routing here */
606         if (pf->msix > 1)
607                 error = ixl_assign_vsi_msix(pf);
608         else
609                 error = ixl_assign_vsi_legacy(pf);
610         if (error) 
611                 goto err_late;
612
613         i40e_msec_delay(75);
614         error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
615         if (error) {
616                 device_printf(dev, "link restart failed, aq_err=%d\n",
617                     pf->hw.aq.asq_last_status);
618         }
619         
620         /* Determine link state */
621         vsi->link_up = ixl_config_link(hw);
622
623         /* Report if Unqualified modules are found */
624         if ((vsi->link_up == FALSE) &&
625             (pf->hw.phy.link_info.link_info &
626             I40E_AQ_MEDIA_AVAILABLE) &&
627             (!(pf->hw.phy.link_info.an_info &
628             I40E_AQ_QUALIFIED_MODULE)))
629                 device_printf(dev, "Link failed because "
630                     "an unqualified module was detected\n");
631
632         /* Setup OS specific network interface */
633         if (ixl_setup_interface(dev, vsi) != 0)
634                 goto err_late;
635
636         /* Get the bus configuration and set the shared code */
637         bus = ixl_get_bus_info(hw, dev);
638         i40e_set_pci_config_data(hw, bus);
639
640         /* Initialize statistics */
641         ixl_pf_reset_stats(pf);
642         ixl_update_stats_counters(pf);
643         ixl_add_hw_stats(pf);
644
645         /* Register for VLAN events */
646         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
647             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
648         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
649             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
650
651         INIT_DEBUGOUT("ixl_attach: end");
652         return (0);
653
654 err_late:
655         ixl_free_vsi(vsi);
656 err_mac_hmc:
657         i40e_shutdown_lan_hmc(hw);
658 err_get_cap:
659         i40e_shutdown_adminq(hw);
660 err_out:
661         if (vsi->ifp != NULL)
662                 if_free(vsi->ifp);
663         ixl_free_pci_resources(pf);
664         IXL_PF_LOCK_DESTROY(pf);
665         return (error);
666 }
667
668 /*********************************************************************
669  *  Device removal routine
670  *
671  *  The detach entry point is called when the driver is being removed.
672  *  This routine stops the adapter and deallocates all the resources
673  *  that were allocated for driver operation.
674  *
675  *  return 0 on success, positive on failure
676  *********************************************************************/
677
678 static int
679 ixl_detach(device_t dev)
680 {
681         struct ixl_pf           *pf = device_get_softc(dev);
682         struct i40e_hw          *hw = &pf->hw;
683         struct ixl_vsi          *vsi = &pf->vsi;
684         struct ixl_queue        *que = vsi->queues;
685         i40e_status             status;
686
687         INIT_DEBUGOUT("ixl_detach: begin");
688
689         /* Make sure VLANS are not using driver */
690         if (vsi->ifp->if_vlantrunk != NULL) {
691                 device_printf(dev,"Vlan in use, detach first\n");
692                 return (EBUSY);
693         }
694
695         IXL_PF_LOCK(pf);
696         ixl_stop(pf);
697         IXL_PF_UNLOCK(pf);
698
699         for (int i = 0; i < vsi->num_queues; i++, que++) {
700                 if (que->tq) {
701                         taskqueue_drain(que->tq, &que->task);
702                         taskqueue_drain(que->tq, &que->tx_task);
703                         taskqueue_free(que->tq);
704                 }
705         }
706
707         /* Shutdown LAN HMC */
708         status = i40e_shutdown_lan_hmc(hw);
709         if (status)
710                 device_printf(dev,
711                     "Shutdown LAN HMC failed with code %d\n", status);
712
713         /* Shutdown admin queue */
714         status = i40e_shutdown_adminq(hw);
715         if (status)
716                 device_printf(dev,
717                     "Shutdown Admin queue failed with code %d\n", status);
718
719         /* Unregister VLAN events */
720         if (vsi->vlan_attach != NULL)
721                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
722         if (vsi->vlan_detach != NULL)
723                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
724
725         ether_ifdetach(vsi->ifp);
726         callout_drain(&pf->timer);
727
728         ixl_free_pci_resources(pf);
729         bus_generic_detach(dev);
730         if_free(vsi->ifp);
731         ixl_free_vsi(vsi);
732         IXL_PF_LOCK_DESTROY(pf);
733         return (0);
734 }
735
736 /*********************************************************************
737  *
738  *  Shutdown entry point
739  *
740  **********************************************************************/
741
742 static int
743 ixl_shutdown(device_t dev)
744 {
745         struct ixl_pf *pf = device_get_softc(dev);
746         IXL_PF_LOCK(pf);
747         ixl_stop(pf);
748         IXL_PF_UNLOCK(pf);
749         return (0);
750 }
751
752
753 /*********************************************************************
754  *
755  *  Get the hardware capabilities
756  *
757  **********************************************************************/
758
759 static int
760 ixl_get_hw_capabilities(struct ixl_pf *pf)
761 {
762         struct i40e_aqc_list_capabilities_element_resp *buf;
763         struct i40e_hw  *hw = &pf->hw;
764         device_t        dev = pf->dev;
765         int             error, len;
766         u16             needed;
767         bool            again = TRUE;
768
769         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770 retry:
771         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773                 device_printf(dev, "Unable to allocate cap memory\n");
774                 return (ENOMEM);
775         }
776
777         /* This populates the hw struct */
778         error = i40e_aq_discover_capabilities(hw, buf, len,
779             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780         free(buf, M_DEVBUF);
781         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782             (again == TRUE)) {
783                 /* retry once with a larger buffer */
784                 again = FALSE;
785                 len = needed;
786                 goto retry;
787         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788                 device_printf(dev, "capability discovery failed: %d\n",
789                     pf->hw.aq.asq_last_status);
790                 return (ENODEV);
791         }
792
793         /* Capture this PF's starting queue pair */
794         pf->qbase = hw->func_caps.base_queue;
795
796 #ifdef IXL_DEBUG
797         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799             hw->pf_id, hw->func_caps.num_vfs,
800             hw->func_caps.num_msix_vectors,
801             hw->func_caps.num_msix_vectors_vf,
802             hw->func_caps.fd_filters_guaranteed,
803             hw->func_caps.fd_filters_best_effort,
804             hw->func_caps.num_tx_qp,
805             hw->func_caps.num_rx_qp,
806             hw->func_caps.base_queue);
807 #endif
808         return (error);
809 }
810
811 static void
812 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813 {
814         device_t        dev = vsi->dev;
815
816         /* Enable/disable TXCSUM/TSO4 */
817         if (!(ifp->if_capenable & IFCAP_TXCSUM)
818             && !(ifp->if_capenable & IFCAP_TSO4)) {
819                 if (mask & IFCAP_TXCSUM) {
820                         ifp->if_capenable |= IFCAP_TXCSUM;
821                         /* enable TXCSUM, restore TSO if previously enabled */
822                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824                                 ifp->if_capenable |= IFCAP_TSO4;
825                         }
826                 }
827                 else if (mask & IFCAP_TSO4) {
828                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830                         device_printf(dev,
831                             "TSO4 requires txcsum, enabling both...\n");
832                 }
833         } else if((ifp->if_capenable & IFCAP_TXCSUM)
834             && !(ifp->if_capenable & IFCAP_TSO4)) {
835                 if (mask & IFCAP_TXCSUM)
836                         ifp->if_capenable &= ~IFCAP_TXCSUM;
837                 else if (mask & IFCAP_TSO4)
838                         ifp->if_capenable |= IFCAP_TSO4;
839         } else if((ifp->if_capenable & IFCAP_TXCSUM)
840             && (ifp->if_capenable & IFCAP_TSO4)) {
841                 if (mask & IFCAP_TXCSUM) {
842                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844                         device_printf(dev, 
845                             "TSO4 requires txcsum, disabling both...\n");
846                 } else if (mask & IFCAP_TSO4)
847                         ifp->if_capenable &= ~IFCAP_TSO4;
848         }
849
850         /* Enable/disable TXCSUM_IPV6/TSO6 */
851         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852             && !(ifp->if_capenable & IFCAP_TSO6)) {
853                 if (mask & IFCAP_TXCSUM_IPV6) {
854                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857                                 ifp->if_capenable |= IFCAP_TSO6;
858                         }
859                 } else if (mask & IFCAP_TSO6) {
860                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862                         device_printf(dev,
863                             "TSO6 requires txcsum6, enabling both...\n");
864                 }
865         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866             && !(ifp->if_capenable & IFCAP_TSO6)) {
867                 if (mask & IFCAP_TXCSUM_IPV6)
868                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869                 else if (mask & IFCAP_TSO6)
870                         ifp->if_capenable |= IFCAP_TSO6;
871         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872             && (ifp->if_capenable & IFCAP_TSO6)) {
873                 if (mask & IFCAP_TXCSUM_IPV6) {
874                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876                         device_printf(dev,
877                             "TSO6 requires txcsum6, disabling both...\n");
878                 } else if (mask & IFCAP_TSO6)
879                         ifp->if_capenable &= ~IFCAP_TSO6;
880         }
881 }
882
883 /*********************************************************************
884  *  Ioctl entry point
885  *
886  *  ixl_ioctl is called when the user wants to configure the
887  *  interface.
888  *
889  *  return 0 on success, positive on failure
890  **********************************************************************/
891
892 static int
893 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894 {
895         struct ixl_vsi  *vsi = ifp->if_softc;
896         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
897         struct ifreq    *ifr = (struct ifreq *) data;
898 #if defined(INET) || defined(INET6)
899         struct ifaddr *ifa = (struct ifaddr *)data;
900         bool            avoid_reset = FALSE;
901 #endif
902         int             error = 0;
903
904         switch (command) {
905
906         case SIOCSIFADDR:
907 #ifdef INET
908                 if (ifa->ifa_addr->sa_family == AF_INET)
909                         avoid_reset = TRUE;
910 #endif
911 #ifdef INET6
912                 if (ifa->ifa_addr->sa_family == AF_INET6)
913                         avoid_reset = TRUE;
914 #endif
915 #if defined(INET) || defined(INET6)
916                 /*
917                 ** Calling init results in link renegotiation,
918                 ** so we avoid doing it when possible.
919                 */
920                 if (avoid_reset) {
921                         ifp->if_flags |= IFF_UP;
922                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
923                                 ixl_init(pf);
924 #ifdef INET
925                         if (!(ifp->if_flags & IFF_NOARP))
926                                 arp_ifinit(ifp, ifa);
927 #endif
928                 } else
929                         error = ether_ioctl(ifp, command, data);
930                 break;
931 #endif
932         case SIOCSIFMTU:
933                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
934                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
935                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
936                         error = EINVAL;
937                 } else {
938                         IXL_PF_LOCK(pf);
939                         ifp->if_mtu = ifr->ifr_mtu;
940                         vsi->max_frame_size =
941                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
942                             + ETHER_VLAN_ENCAP_LEN;
943                         ixl_init_locked(pf);
944                         IXL_PF_UNLOCK(pf);
945                 }
946                 break;
947         case SIOCSIFFLAGS:
948                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
949                 IXL_PF_LOCK(pf);
950                 if (ifp->if_flags & IFF_UP) {
951                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
952                                 if ((ifp->if_flags ^ pf->if_flags) &
953                                     (IFF_PROMISC | IFF_ALLMULTI)) {
954                                         ixl_set_promisc(vsi);
955                                 }
956                         } else
957                                 ixl_init_locked(pf);
958                 } else
959                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
960                                 ixl_stop(pf);
961                 pf->if_flags = ifp->if_flags;
962                 IXL_PF_UNLOCK(pf);
963                 break;
964         case SIOCADDMULTI:
965                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
966                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967                         IXL_PF_LOCK(pf);
968                         ixl_disable_intr(vsi);
969                         ixl_add_multi(vsi);
970                         ixl_enable_intr(vsi);
971                         IXL_PF_UNLOCK(pf);
972                 }
973                 break;
974         case SIOCDELMULTI:
975                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
976                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977                         IXL_PF_LOCK(pf);
978                         ixl_disable_intr(vsi);
979                         ixl_del_multi(vsi);
980                         ixl_enable_intr(vsi);
981                         IXL_PF_UNLOCK(pf);
982                 }
983                 break;
984         case SIOCSIFMEDIA:
985         case SIOCGIFMEDIA:
986                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
987                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
988                 break;
989         case SIOCSIFCAP:
990         {
991                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
992                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
993
994                 ixl_cap_txcsum_tso(vsi, ifp, mask);
995
996                 if (mask & IFCAP_RXCSUM)
997                         ifp->if_capenable ^= IFCAP_RXCSUM;
998                 if (mask & IFCAP_RXCSUM_IPV6)
999                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1000                 if (mask & IFCAP_LRO)
1001                         ifp->if_capenable ^= IFCAP_LRO;
1002                 if (mask & IFCAP_VLAN_HWTAGGING)
1003                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1004                 if (mask & IFCAP_VLAN_HWFILTER)
1005                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1006                 if (mask & IFCAP_VLAN_HWTSO)
1007                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1008                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1009                         IXL_PF_LOCK(pf);
1010                         ixl_init_locked(pf);
1011                         IXL_PF_UNLOCK(pf);
1012                 }
1013                 VLAN_CAPABILITIES(ifp);
1014
1015                 break;
1016         }
1017
1018         default:
1019                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1020                 error = ether_ioctl(ifp, command, data);
1021                 break;
1022         }
1023
1024         return (error);
1025 }
1026
1027
1028 /*********************************************************************
1029  *  Init entry point
1030  *
1031  *  This routine is used in two ways. It is used by the stack as
1032  *  init entry point in network interface structure. It is also used
1033  *  by the driver as a hw/sw initialization routine to get to a
1034  *  consistent state.
1035  *
1036  *  return 0 on success, positive on failure
1037  **********************************************************************/
1038
1039 static void
1040 ixl_init_locked(struct ixl_pf *pf)
1041 {
1042         struct i40e_hw  *hw = &pf->hw;
1043         struct ixl_vsi  *vsi = &pf->vsi;
1044         struct ifnet    *ifp = vsi->ifp;
1045         device_t        dev = pf->dev;
1046         struct i40e_filter_control_settings     filter;
1047         u8              tmpaddr[ETHER_ADDR_LEN];
1048         int             ret;
1049
1050         mtx_assert(&pf->pf_mtx, MA_OWNED);
1051         INIT_DEBUGOUT("ixl_init: begin");
1052         ixl_stop(pf);
1053
1054         /* Get the latest mac address... User might use a LAA */
1055         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1056               I40E_ETH_LENGTH_OF_ADDRESS);
1057         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
1058             i40e_validate_mac_addr(tmpaddr)) {
1059                 bcopy(tmpaddr, hw->mac.addr,
1060                     I40E_ETH_LENGTH_OF_ADDRESS);
1061                 ret = i40e_aq_mac_address_write(hw,
1062                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1063                     hw->mac.addr, NULL);
1064                 if (ret) {
1065                         device_printf(dev, "LLA address"
1066                          "change failed!!\n");
1067                         return;
1068                 }
1069         }
1070
1071         /* Set the various hardware offload abilities */
1072         ifp->if_hwassist = 0;
1073         if (ifp->if_capenable & IFCAP_TSO)
1074                 ifp->if_hwassist |= CSUM_TSO;
1075         if (ifp->if_capenable & IFCAP_TXCSUM)
1076                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1077         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1078                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1079
1080         /* Set up the device filtering */
1081         bzero(&filter, sizeof(filter));
1082         filter.enable_ethtype = TRUE;
1083         filter.enable_macvlan = TRUE;
1084 #ifdef IXL_FDIR
1085         filter.enable_fdir = TRUE;
1086 #endif
1087         if (i40e_set_filter_control(hw, &filter))
1088                 device_printf(dev, "set_filter_control() failed\n");
1089
1090         /* Set up RSS */
1091         ixl_config_rss(vsi);
1092
1093         /* Setup the VSI */
1094         ixl_setup_vsi(vsi);
1095
1096         /*
1097         ** Prepare the rings, hmc contexts, etc...
1098         */
1099         if (ixl_initialize_vsi(vsi)) {
1100                 device_printf(dev, "initialize vsi failed!!\n");
1101                 return;
1102         }
1103
1104         /* Add protocol filters to list */
1105         ixl_init_filters(vsi);
1106
1107         /* Setup vlan's if needed */
1108         ixl_setup_vlan_filters(vsi);
1109
1110         /* Start the local timer */
1111         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1112
1113         /* Set up MSI/X routing and the ITR settings */
1114         if (ixl_enable_msix) {
1115                 ixl_configure_msix(pf);
1116                 ixl_configure_itr(pf);
1117         } else
1118                 ixl_configure_legacy(pf);
1119
1120         ixl_enable_rings(vsi);
1121
1122         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1123
1124         /* Set MTU in hardware*/
1125         int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1126             TRUE, 0, NULL);
1127         if (aq_error)
1128                 device_printf(vsi->dev,
1129                         "aq_set_mac_config in init error, code %d\n",
1130                     aq_error);
1131
1132         /* And now turn on interrupts */
1133         ixl_enable_intr(vsi);
1134
1135         /* Now inform the stack we're ready */
1136         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1137         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1138
1139         return;
1140 }
1141
1142 static void
1143 ixl_init(void *arg)
1144 {
1145         struct ixl_pf *pf = arg;
1146
1147         IXL_PF_LOCK(pf);
1148         ixl_init_locked(pf);
1149         IXL_PF_UNLOCK(pf);
1150         return;
1151 }
1152
1153 /*
1154 **
1155 ** MSIX Interrupt Handlers and Tasklets
1156 **
1157 */
1158 static void
1159 ixl_handle_que(void *context, int pending)
1160 {
1161         struct ixl_queue *que = context;
1162         struct ixl_vsi *vsi = que->vsi;
1163         struct i40e_hw  *hw = vsi->hw;
1164         struct tx_ring  *txr = &que->txr;
1165         struct ifnet    *ifp = vsi->ifp;
1166         bool            more;
1167
1168         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1169                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1170                 IXL_TX_LOCK(txr);
1171                 ixl_txeof(que);
1172                 if (!drbr_empty(ifp, txr->br))
1173                         ixl_mq_start_locked(ifp, txr);
1174                 IXL_TX_UNLOCK(txr);
1175                 if (more) {
1176                         taskqueue_enqueue(que->tq, &que->task);
1177                         return;
1178                 }
1179         }
1180
1181         /* Reenable this interrupt - hmmm */
1182         ixl_enable_queue(hw, que->me);
1183         return;
1184 }
1185
1186
1187 /*********************************************************************
1188  *
1189  *  Legacy Interrupt Service routine
1190  *
1191  **********************************************************************/
1192 void
1193 ixl_intr(void *arg)
1194 {
1195         struct ixl_pf           *pf = arg;
1196         struct i40e_hw          *hw =  &pf->hw;
1197         struct ixl_vsi          *vsi = &pf->vsi;
1198         struct ixl_queue        *que = vsi->queues;
1199         struct ifnet            *ifp = vsi->ifp;
1200         struct tx_ring          *txr = &que->txr;
1201         u32                     reg, icr0, mask;
1202         bool                    more_tx, more_rx;
1203
1204         ++que->irqs;
1205
1206         /* Protect against spurious interrupts */
1207         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1208                 return;
1209
1210         icr0 = rd32(hw, I40E_PFINT_ICR0);
1211
1212         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1213         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1214         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1215
1216         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1217
1218         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1219                 taskqueue_enqueue(pf->tq, &pf->adminq);
1220                 return;
1221         }
1222
1223         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1224
1225         IXL_TX_LOCK(txr);
1226         more_tx = ixl_txeof(que);
1227         if (!drbr_empty(vsi->ifp, txr->br))
1228                 more_tx = 1;
1229         IXL_TX_UNLOCK(txr);
1230
1231         /* re-enable other interrupt causes */
1232         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1233
1234         /* And now the queues */
1235         reg = rd32(hw, I40E_QINT_RQCTL(0));
1236         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1237         wr32(hw, I40E_QINT_RQCTL(0), reg);
1238
1239         reg = rd32(hw, I40E_QINT_TQCTL(0));
1240         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1241         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1242         wr32(hw, I40E_QINT_TQCTL(0), reg);
1243
1244         ixl_enable_legacy(hw);
1245
1246         return;
1247 }
1248
1249
1250 /*********************************************************************
1251  *
1252  *  MSIX VSI Interrupt Service routine
1253  *
1254  **********************************************************************/
1255 void
1256 ixl_msix_que(void *arg)
1257 {
1258         struct ixl_queue        *que = arg;
1259         struct ixl_vsi  *vsi = que->vsi;
1260         struct i40e_hw  *hw = vsi->hw;
1261         struct tx_ring  *txr = &que->txr;
1262         bool            more_tx, more_rx;
1263
1264         /* Protect against spurious interrupts */
1265         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1266                 return;
1267
1268         ++que->irqs;
1269
1270         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1271
1272         IXL_TX_LOCK(txr);
1273         more_tx = ixl_txeof(que);
1274         /*
1275         ** Make certain that if the stack 
1276         ** has anything queued the task gets
1277         ** scheduled to handle it.
1278         */
1279         if (!drbr_empty(vsi->ifp, txr->br))
1280                 more_tx = 1;
1281         IXL_TX_UNLOCK(txr);
1282
1283         ixl_set_queue_rx_itr(que);
1284         ixl_set_queue_tx_itr(que);
1285
1286         if (more_tx || more_rx)
1287                 taskqueue_enqueue(que->tq, &que->task);
1288         else
1289                 ixl_enable_queue(hw, que->me);
1290
1291         return;
1292 }
1293
1294
1295 /*********************************************************************
1296  *
1297  *  MSIX Admin Queue Interrupt Service routine
1298  *
1299  **********************************************************************/
1300 static void
1301 ixl_msix_adminq(void *arg)
1302 {
1303         struct ixl_pf   *pf = arg;
1304         struct i40e_hw  *hw = &pf->hw;
1305         u32             reg, mask;
1306
1307         ++pf->admin_irq;
1308
1309         reg = rd32(hw, I40E_PFINT_ICR0);
1310         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1311
1312         /* Check on the cause */
1313         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1314                 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1315
1316         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1317                 ixl_handle_mdd_event(pf);
1318                 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1319         }
1320
1321         if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1322                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1323
1324         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1325         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1326         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1327
1328         taskqueue_enqueue(pf->tq, &pf->adminq);
1329         return;
1330 }
1331
1332 /*********************************************************************
1333  *
1334  *  Media Ioctl callback
1335  *
1336  *  This routine is called whenever the user queries the status of
1337  *  the interface using ifconfig.
1338  *
1339  **********************************************************************/
1340 static void
1341 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1342 {
1343         struct ixl_vsi  *vsi = ifp->if_softc;
1344         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
1345         struct i40e_hw  *hw = &pf->hw;
1346
1347         INIT_DEBUGOUT("ixl_media_status: begin");
1348         IXL_PF_LOCK(pf);
1349
1350         ixl_update_link_status(pf);
1351
1352         ifmr->ifm_status = IFM_AVALID;
1353         ifmr->ifm_active = IFM_ETHER;
1354
1355         if (!vsi->link_up) {
1356                 IXL_PF_UNLOCK(pf);
1357                 return;
1358         }
1359
1360         ifmr->ifm_status |= IFM_ACTIVE;
1361         /* Hardware is always full-duplex */
1362         ifmr->ifm_active |= IFM_FDX;
1363
1364         switch (hw->phy.link_info.phy_type) {
1365                 /* 100 M */
1366                 case I40E_PHY_TYPE_100BASE_TX:
1367                         ifmr->ifm_active |= IFM_100_TX;
1368                         break;
1369                 /* 1 G */
1370                 case I40E_PHY_TYPE_1000BASE_T:
1371                         ifmr->ifm_active |= IFM_1000_T;
1372                         break;
1373                 case I40E_PHY_TYPE_1000BASE_SX:
1374                         ifmr->ifm_active |= IFM_1000_SX;
1375                         break;
1376                 case I40E_PHY_TYPE_1000BASE_LX:
1377                         ifmr->ifm_active |= IFM_1000_LX;
1378                         break;
1379                 /* 10 G */
1380                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1381                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1382                         ifmr->ifm_active |= IFM_10G_TWINAX;
1383                         break;
1384                 case I40E_PHY_TYPE_10GBASE_SR:
1385                         ifmr->ifm_active |= IFM_10G_SR;
1386                         break;
1387                 case I40E_PHY_TYPE_10GBASE_LR:
1388                         ifmr->ifm_active |= IFM_10G_LR;
1389                         break;
1390                 case I40E_PHY_TYPE_10GBASE_T:
1391                         ifmr->ifm_active |= IFM_10G_T;
1392                         break;
1393                 /* 40 G */
1394                 case I40E_PHY_TYPE_40GBASE_CR4:
1395                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1396                         ifmr->ifm_active |= IFM_40G_CR4;
1397                         break;
1398                 case I40E_PHY_TYPE_40GBASE_SR4:
1399                         ifmr->ifm_active |= IFM_40G_SR4;
1400                         break;
1401                 case I40E_PHY_TYPE_40GBASE_LR4:
1402                         ifmr->ifm_active |= IFM_40G_LR4;
1403                         break;
1404                 default:
1405                         ifmr->ifm_active |= IFM_UNKNOWN;
1406                         break;
1407         }
1408         /* Report flow control status as well */
1409         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1410                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1411         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1412                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1413
1414         IXL_PF_UNLOCK(pf);
1415
1416         return;
1417 }
1418
1419 /*********************************************************************
1420  *
1421  *  Media Ioctl callback
1422  *
1423  *  This routine is called when the user changes speed/duplex using
1424  *  media/mediopt option with ifconfig.
1425  *
1426  **********************************************************************/
1427 static int
1428 ixl_media_change(struct ifnet * ifp)
1429 {
1430         struct ixl_vsi *vsi = ifp->if_softc;
1431         struct ifmedia *ifm = &vsi->media;
1432
1433         INIT_DEBUGOUT("ixl_media_change: begin");
1434
1435         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1436                 return (EINVAL);
1437
1438         if_printf(ifp, "Media change is currently not supported.\n");
1439
1440         return (ENODEV);
1441 }
1442
1443
1444 #ifdef IXL_FDIR
1445 /*
1446 ** ATR: Application Targetted Receive - creates a filter
1447 **      based on TX flow info that will keep the receive
1448 **      portion of the flow on the same queue. Based on the
1449 **      implementation this is only available for TCP connections
1450 */
1451 void
1452 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1453 {
1454         struct ixl_vsi                  *vsi = que->vsi;
1455         struct tx_ring                  *txr = &que->txr;
1456         struct i40e_filter_program_desc *FDIR;
1457         u32                             ptype, dtype;
1458         int                             idx;
1459
1460         /* check if ATR is enabled and sample rate */
1461         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1462                 return;
1463         /*
1464         ** We sample all TCP SYN/FIN packets,
1465         ** or at the selected sample rate 
1466         */
1467         txr->atr_count++;
1468         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1469             (txr->atr_count < txr->atr_rate))
1470                 return;
1471         txr->atr_count = 0;
1472
1473         /* Get a descriptor to use */
1474         idx = txr->next_avail;
1475         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1476         if (++idx == que->num_desc)
1477                 idx = 0;
1478         txr->avail--;
1479         txr->next_avail = idx;
1480
1481         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1482             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1483
1484         ptype |= (etype == ETHERTYPE_IP) ?
1485             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1486             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1487             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1488             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1489
1490         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1491
1492         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1493
1494         /*
1495         ** We use the TCP TH_FIN as a trigger to remove
1496         ** the filter, otherwise its an update.
1497         */
1498         dtype |= (th->th_flags & TH_FIN) ?
1499             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1500             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1501             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1502             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1503
1504         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1505             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1506
1507         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1508             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1509
1510         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1511         FDIR->dtype_cmd_cntindex = htole32(dtype);
1512         return;
1513 }
1514 #endif
1515
1516
1517 static void
1518 ixl_set_promisc(struct ixl_vsi *vsi)
1519 {
1520         struct ifnet    *ifp = vsi->ifp;
1521         struct i40e_hw  *hw = vsi->hw;
1522         int             err, mcnt = 0;
1523         bool            uni = FALSE, multi = FALSE;
1524
1525         if (ifp->if_flags & IFF_ALLMULTI)
1526                 multi = TRUE;
1527         else { /* Need to count the multicast addresses */
1528                 struct  ifmultiaddr *ifma;
1529                 if_maddr_rlock(ifp);
1530                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1531                         if (ifma->ifma_addr->sa_family != AF_LINK)
1532                                 continue;
1533                         if (mcnt == MAX_MULTICAST_ADDR)
1534                                 break;
1535                         mcnt++;
1536                 }
1537                 if_maddr_runlock(ifp);
1538         }
1539
1540         if (mcnt >= MAX_MULTICAST_ADDR)
1541                 multi = TRUE;
1542         if (ifp->if_flags & IFF_PROMISC)
1543                 uni = TRUE;
1544
1545         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1546             vsi->seid, uni, NULL);
1547         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1548             vsi->seid, multi, NULL);
1549         return;
1550 }
1551
1552 /*********************************************************************
1553  *      Filter Routines
1554  *
1555  *      Routines for multicast and vlan filter management.
1556  *
1557  *********************************************************************/
1558 static void
1559 ixl_add_multi(struct ixl_vsi *vsi)
1560 {
1561         struct  ifmultiaddr     *ifma;
1562         struct ifnet            *ifp = vsi->ifp;
1563         struct i40e_hw          *hw = vsi->hw;
1564         int                     mcnt = 0, flags;
1565
1566         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1567
1568         if_maddr_rlock(ifp);
1569         /*
1570         ** First just get a count, to decide if we
1571         ** we simply use multicast promiscuous.
1572         */
1573         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1574                 if (ifma->ifma_addr->sa_family != AF_LINK)
1575                         continue;
1576                 mcnt++;
1577         }
1578         if_maddr_runlock(ifp);
1579
1580         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1581                 /* delete existing MC filters */
1582                 ixl_del_hw_filters(vsi, mcnt);
1583                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1584                     vsi->seid, TRUE, NULL);
1585                 return;
1586         }
1587
1588         mcnt = 0;
1589         if_maddr_rlock(ifp);
1590         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1591                 if (ifma->ifma_addr->sa_family != AF_LINK)
1592                         continue;
1593                 ixl_add_mc_filter(vsi,
1594                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1595                 mcnt++;
1596         }
1597         if_maddr_runlock(ifp);
1598         if (mcnt > 0) {
1599                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1600                 ixl_add_hw_filters(vsi, flags, mcnt);
1601         }
1602
1603         IOCTL_DEBUGOUT("ixl_add_multi: end");
1604         return;
1605 }
1606
1607 static void
1608 ixl_del_multi(struct ixl_vsi *vsi)
1609 {
1610         struct ifnet            *ifp = vsi->ifp;
1611         struct ifmultiaddr      *ifma;
1612         struct ixl_mac_filter   *f;
1613         int                     mcnt = 0;
1614         bool            match = FALSE;
1615
1616         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1617
1618         /* Search for removed multicast addresses */
1619         if_maddr_rlock(ifp);
1620         SLIST_FOREACH(f, &vsi->ftl, next) {
1621                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1622                         match = FALSE;
1623                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1624                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1625                                         continue;
1626                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1627                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1628                                         match = TRUE;
1629                                         break;
1630                                 }
1631                         }
1632                         if (match == FALSE) {
1633                                 f->flags |= IXL_FILTER_DEL;
1634                                 mcnt++;
1635                         }
1636                 }
1637         }
1638         if_maddr_runlock(ifp);
1639
1640         if (mcnt > 0)
1641                 ixl_del_hw_filters(vsi, mcnt);
1642 }
1643
1644
1645 /*********************************************************************
1646  *  Timer routine
1647  *
1648  *  This routine checks for link status,updates statistics,
1649  *  and runs the watchdog check.
1650  *
1651  **********************************************************************/
1652
1653 static void
1654 ixl_local_timer(void *arg)
1655 {
1656         struct ixl_pf           *pf = arg;
1657         struct i40e_hw          *hw = &pf->hw;
1658         struct ixl_vsi          *vsi = &pf->vsi;
1659         struct ixl_queue        *que = vsi->queues;
1660         device_t                dev = pf->dev;
1661         int                     hung = 0;
1662         u32                     mask;
1663
1664         mtx_assert(&pf->pf_mtx, MA_OWNED);
1665
1666         /* Fire off the adminq task */
1667         taskqueue_enqueue(pf->tq, &pf->adminq);
1668
1669         /* Update stats */
1670         ixl_update_stats_counters(pf);
1671
1672         /*
1673         ** Check status of the queues
1674         */
1675         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1676                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1677  
1678         for (int i = 0; i < vsi->num_queues; i++,que++) {
1679                 /* Any queues with outstanding work get a sw irq */
1680                 if (que->busy)
1681                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1682                 /*
1683                 ** Each time txeof runs without cleaning, but there
1684                 ** are uncleaned descriptors it increments busy. If
1685                 ** we get to 5 we declare it hung.
1686                 */
1687                 if (que->busy == IXL_QUEUE_HUNG) {
1688                         ++hung;
1689                         /* Mark the queue as inactive */
1690                         vsi->active_queues &= ~((u64)1 << que->me);
1691                         continue;
1692                 } else {
1693                         /* Check if we've come back from hung */
1694                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1695                                 vsi->active_queues |= ((u64)1 << que->me);
1696                 }
1697                 if (que->busy >= IXL_MAX_TX_BUSY) {
1698                         device_printf(dev,"Warning queue %d "
1699                             "appears to be hung!\n", i);
1700                         que->busy = IXL_QUEUE_HUNG;
1701                         ++hung;
1702                 }
1703         }
1704         /* Only reinit if all queues show hung */
1705         if (hung == vsi->num_queues)
1706                 goto hung;
1707
1708         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1709         return;
1710
1711 hung:
1712         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1713         ixl_init_locked(pf);
1714 }
1715
1716 /*
1717 ** Note: this routine updates the OS on the link state
1718 **      the real check of the hardware only happens with
1719 **      a link interrupt.
1720 */
1721 static void
1722 ixl_update_link_status(struct ixl_pf *pf)
1723 {
1724         struct ixl_vsi          *vsi = &pf->vsi;
1725         struct i40e_hw          *hw = &pf->hw;
1726         struct ifnet            *ifp = vsi->ifp;
1727         device_t                dev = pf->dev;
1728         enum i40e_fc_mode       fc;
1729
1730
1731         if (vsi->link_up){ 
1732                 if (vsi->link_active == FALSE) {
1733                         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1734                         if (bootverbose) {
1735                                 fc = hw->fc.current_mode;
1736                                 device_printf(dev,"Link is up %d Gbps %s,"
1737                                     " Flow Control: %s\n",
1738                                     ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1739                                     "Full Duplex", ixl_fc_string[fc]);
1740                         }
1741                         vsi->link_active = TRUE;
1742                         if_link_state_change(ifp, LINK_STATE_UP);
1743                 }
1744         } else { /* Link down */
1745                 if (vsi->link_active == TRUE) {
1746                         if (bootverbose)
1747                                 device_printf(dev,"Link is Down\n");
1748                         if_link_state_change(ifp, LINK_STATE_DOWN);
1749                         vsi->link_active = FALSE;
1750                 }
1751         }
1752
1753         return;
1754 }
1755
1756 /*********************************************************************
1757  *
1758  *  This routine disables all traffic on the adapter by issuing a
1759  *  global reset on the MAC and deallocates TX/RX buffers.
1760  *
1761  **********************************************************************/
1762
1763 static void
1764 ixl_stop(struct ixl_pf *pf)
1765 {
1766         struct ixl_vsi  *vsi = &pf->vsi;
1767         struct ifnet    *ifp = vsi->ifp;
1768
1769         mtx_assert(&pf->pf_mtx, MA_OWNED);
1770
1771         INIT_DEBUGOUT("ixl_stop: begin\n");
1772         ixl_disable_intr(vsi);
1773         ixl_disable_rings(vsi);
1774
1775         /* Tell the stack that the interface is no longer active */
1776         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1777
1778         /* Stop the local timer */
1779         callout_stop(&pf->timer);
1780
1781         return;
1782 }
1783
1784
1785 /*********************************************************************
1786  *
1787  *  Setup MSIX Interrupt resources and handlers for the VSI
1788  *
1789  **********************************************************************/
1790 static int
1791 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1792 {
1793         device_t        dev = pf->dev;
1794         struct          ixl_vsi *vsi = &pf->vsi;
1795         struct          ixl_queue *que = vsi->queues;
1796         int             error, rid = 0;
1797
1798         if (pf->msix == 1)
1799                 rid = 1;
1800         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1801             &rid, RF_SHAREABLE | RF_ACTIVE);
1802         if (pf->res == NULL) {
1803                 device_printf(dev,"Unable to allocate"
1804                     " bus resource: vsi legacy/msi interrupt\n");
1805                 return (ENXIO);
1806         }
1807
1808         /* Set the handler function */
1809         error = bus_setup_intr(dev, pf->res,
1810             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1811             ixl_intr, pf, &pf->tag);
1812         if (error) {
1813                 pf->res = NULL;
1814                 device_printf(dev, "Failed to register legacy/msi handler");
1815                 return (error);
1816         }
1817         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1818         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1819         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1820         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1821             taskqueue_thread_enqueue, &que->tq);
1822         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1823             device_get_nameunit(dev));
1824         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1825         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1826             taskqueue_thread_enqueue, &pf->tq);
1827         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1828             device_get_nameunit(dev));
1829
1830         return (0);
1831 }
1832
1833
1834 /*********************************************************************
1835  *
1836  *  Setup MSIX Interrupt resources and handlers for the VSI
1837  *
1838  **********************************************************************/
1839 static int
1840 ixl_assign_vsi_msix(struct ixl_pf *pf)
1841 {
1842         device_t        dev = pf->dev;
1843         struct          ixl_vsi *vsi = &pf->vsi;
1844         struct          ixl_queue *que = vsi->queues;
1845         struct          tx_ring  *txr;
1846         int             error, rid, vector = 0;
1847
1848         /* Admin Que is vector 0*/
1849         rid = vector + 1;
1850         pf->res = bus_alloc_resource_any(dev,
1851             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1852         if (!pf->res) {
1853                 device_printf(dev,"Unable to allocate"
1854             " bus resource: Adminq interrupt [%d]\n", rid);
1855                 return (ENXIO);
1856         }
1857         /* Set the adminq vector and handler */
1858         error = bus_setup_intr(dev, pf->res,
1859             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1860             ixl_msix_adminq, pf, &pf->tag);
1861         if (error) {
1862                 pf->res = NULL;
1863                 device_printf(dev, "Failed to register Admin que handler");
1864                 return (error);
1865         }
1866         bus_describe_intr(dev, pf->res, pf->tag, "aq");
1867         pf->admvec = vector;
1868         /* Tasklet for Admin Queue */
1869         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1870         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1871             taskqueue_thread_enqueue, &pf->tq);
1872         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1873             device_get_nameunit(pf->dev));
1874         ++vector;
1875
1876         /* Now set up the stations */
1877         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1878                 rid = vector + 1;
1879                 txr = &que->txr;
1880                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1881                     RF_SHAREABLE | RF_ACTIVE);
1882                 if (que->res == NULL) {
1883                         device_printf(dev,"Unable to allocate"
1884                             " bus resource: que interrupt [%d]\n", vector);
1885                         return (ENXIO);
1886                 }
1887                 /* Set the handler function */
1888                 error = bus_setup_intr(dev, que->res,
1889                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1890                     ixl_msix_que, que, &que->tag);
1891                 if (error) {
1892                         que->res = NULL;
1893                         device_printf(dev, "Failed to register que handler");
1894                         return (error);
1895                 }
1896                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1897                 /* Bind the vector to a CPU */
1898                 bus_bind_intr(dev, que->res, i);
1899                 que->msix = vector;
1900                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1901                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1902                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1903                     taskqueue_thread_enqueue, &que->tq);
1904                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1905                     device_get_nameunit(pf->dev));
1906         }
1907
1908         return (0);
1909 }
1910
1911
1912 /*
1913  * Allocate MSI/X vectors
1914  */
1915 static int
1916 ixl_init_msix(struct ixl_pf *pf)
1917 {
1918         device_t dev = pf->dev;
1919         int rid, want, vectors, queues, available;
1920
1921         /* Override by tuneable */
1922         if (ixl_enable_msix == 0)
1923                 goto msi;
1924
1925         /*
1926         ** When used in a virtualized environment 
1927         ** PCI BUSMASTER capability may not be set
1928         ** so explicity set it here and rewrite
1929         ** the ENABLE in the MSIX control register
1930         ** at this point to cause the host to
1931         ** successfully initialize us.
1932         */
1933         {
1934                 u16 pci_cmd_word;
1935                 int msix_ctrl;
1936                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1937                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1938                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1939                 pci_find_cap(dev, PCIY_MSIX, &rid);
1940                 rid += PCIR_MSIX_CTRL;
1941                 msix_ctrl = pci_read_config(dev, rid, 2);
1942                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1943                 pci_write_config(dev, rid, msix_ctrl, 2);
1944         }
1945
1946         /* First try MSI/X */
1947         rid = PCIR_BAR(IXL_BAR);
1948         pf->msix_mem = bus_alloc_resource_any(dev,
1949             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1950         if (!pf->msix_mem) {
1951                 /* May not be enabled */
1952                 device_printf(pf->dev,
1953                     "Unable to map MSIX table \n");
1954                 goto msi;
1955         }
1956
1957         available = pci_msix_count(dev); 
1958         if (available == 0) { /* system has msix disabled */
1959                 bus_release_resource(dev, SYS_RES_MEMORY,
1960                     rid, pf->msix_mem);
1961                 pf->msix_mem = NULL;
1962                 goto msi;
1963         }
1964
1965         /* Figure out a reasonable auto config value */
1966         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1967
1968         /* Override with hardcoded value if sane */
1969         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
1970                 queues = ixl_max_queues;
1971
1972         /*
1973         ** Want one vector (RX/TX pair) per queue
1974         ** plus an additional for the admin queue.
1975         */
1976         want = queues + 1;
1977         if (want <= available)  /* Have enough */
1978                 vectors = want;
1979         else {
1980                 device_printf(pf->dev,
1981                     "MSIX Configuration Problem, "
1982                     "%d vectors available but %d wanted!\n",
1983                     available, want);
1984                 return (0); /* Will go to Legacy setup */
1985         }
1986
1987         if (pci_alloc_msix(dev, &vectors) == 0) {
1988                 device_printf(pf->dev,
1989                     "Using MSIX interrupts with %d vectors\n", vectors);
1990                 pf->msix = vectors;
1991                 pf->vsi.num_queues = queues;
1992                 return (vectors);
1993         }
1994 msi:
1995         vectors = pci_msi_count(dev);
1996         pf->vsi.num_queues = 1;
1997         pf->msix = 1;
1998         ixl_max_queues = 1;
1999         ixl_enable_msix = 0;
2000         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2001                 device_printf(pf->dev,"Using an MSI interrupt\n");
2002         else {
2003                 pf->msix = 0;
2004                 device_printf(pf->dev,"Using a Legacy interrupt\n");
2005         }
2006         return (vectors);
2007 }
2008
2009
2010 /*
2011  * Plumb MSI/X vectors
2012  */
2013 static void
2014 ixl_configure_msix(struct ixl_pf *pf)
2015 {
2016         struct i40e_hw  *hw = &pf->hw;
2017         struct ixl_vsi *vsi = &pf->vsi;
2018         u32             reg;
2019         u16             vector = 1;
2020
2021         /* First set up the adminq - vector 0 */
2022         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2023         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2024
2025         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2026             I40E_PFINT_ICR0_ENA_GRST_MASK |
2027             I40E_PFINT_ICR0_HMC_ERR_MASK |
2028             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2029             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2030             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2031             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2032         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2033
2034         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2035         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2036
2037         wr32(hw, I40E_PFINT_DYN_CTL0,
2038             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2039             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2040
2041         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2042
2043         /* Next configure the queues */
2044         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2045                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2046                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2047
2048                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2049                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2050                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2051                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2052                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2053                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2054
2055                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2056                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2057                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2058                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2059                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2060                 if (i == (vsi->num_queues - 1))
2061                         reg |= (IXL_QUEUE_EOL
2062                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2063                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2064         }
2065 }
2066
2067 /*
2068  * Configure for MSI single vector operation 
2069  */
2070 static void
2071 ixl_configure_legacy(struct ixl_pf *pf)
2072 {
2073         struct i40e_hw  *hw = &pf->hw;
2074         u32             reg;
2075
2076
2077         wr32(hw, I40E_PFINT_ITR0(0), 0);
2078         wr32(hw, I40E_PFINT_ITR0(1), 0);
2079
2080
2081         /* Setup "other" causes */
2082         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2083             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2084             | I40E_PFINT_ICR0_ENA_GRST_MASK
2085             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2086             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2087             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2088             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2089             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2090             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2091             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2092             ;
2093         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2094
2095         /* SW_ITR_IDX = 0, but don't change INTENA */
2096         wr32(hw, I40E_PFINT_DYN_CTL0,
2097             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2098             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2099         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2100         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2101
2102         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2103         wr32(hw, I40E_PFINT_LNKLST0, 0);
2104
2105         /* Associate the queue pair to the vector and enable the q int */
2106         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2107             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2108             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2109         wr32(hw, I40E_QINT_RQCTL(0), reg);
2110
2111         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2112             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2113             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2114         wr32(hw, I40E_QINT_TQCTL(0), reg);
2115
2116         /* Next enable the queue pair */
2117         reg = rd32(hw, I40E_QTX_ENA(0));
2118         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2119         wr32(hw, I40E_QTX_ENA(0), reg);
2120
2121         reg = rd32(hw, I40E_QRX_ENA(0));
2122         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2123         wr32(hw, I40E_QRX_ENA(0), reg);
2124 }
2125
2126
2127 /*
2128  * Set the Initial ITR state
2129  */
2130 static void
2131 ixl_configure_itr(struct ixl_pf *pf)
2132 {
2133         struct i40e_hw          *hw = &pf->hw;
2134         struct ixl_vsi          *vsi = &pf->vsi;
2135         struct ixl_queue        *que = vsi->queues;
2136
2137         vsi->rx_itr_setting = ixl_rx_itr;
2138         if (ixl_dynamic_rx_itr)
2139                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2140         vsi->tx_itr_setting = ixl_tx_itr;
2141         if (ixl_dynamic_tx_itr)
2142                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2143         
2144         for (int i = 0; i < vsi->num_queues; i++, que++) {
2145                 struct tx_ring  *txr = &que->txr;
2146                 struct rx_ring  *rxr = &que->rxr;
2147
2148                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2149                     vsi->rx_itr_setting);
2150                 rxr->itr = vsi->rx_itr_setting;
2151                 rxr->latency = IXL_AVE_LATENCY;
2152                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2153                     vsi->tx_itr_setting);
2154                 txr->itr = vsi->tx_itr_setting;
2155                 txr->latency = IXL_AVE_LATENCY;
2156         }
2157 }
2158
2159
2160 static int
2161 ixl_allocate_pci_resources(struct ixl_pf *pf)
2162 {
2163         int             rid;
2164         device_t        dev = pf->dev;
2165
2166         rid = PCIR_BAR(0);
2167         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2168             &rid, RF_ACTIVE);
2169
2170         if (!(pf->pci_mem)) {
2171                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2172                 return (ENXIO);
2173         }
2174
2175         pf->osdep.mem_bus_space_tag =
2176                 rman_get_bustag(pf->pci_mem);
2177         pf->osdep.mem_bus_space_handle =
2178                 rman_get_bushandle(pf->pci_mem);
2179         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2180         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2181
2182         pf->hw.back = &pf->osdep;
2183
2184         /*
2185         ** Now setup MSI or MSI/X, should
2186         ** return us the number of supported
2187         ** vectors. (Will be 1 for MSI)
2188         */
2189         pf->msix = ixl_init_msix(pf);
2190         return (0);
2191 }
2192
2193 static void
2194 ixl_free_pci_resources(struct ixl_pf * pf)
2195 {
2196         struct ixl_vsi          *vsi = &pf->vsi;
2197         struct ixl_queue        *que = vsi->queues;
2198         device_t                dev = pf->dev;
2199         int                     rid, memrid;
2200
2201         memrid = PCIR_BAR(IXL_BAR);
2202
2203         /* We may get here before stations are setup */
2204         if ((!ixl_enable_msix) || (que == NULL))
2205                 goto early;
2206
2207         /*
2208         **  Release all msix VSI resources:
2209         */
2210         for (int i = 0; i < vsi->num_queues; i++, que++) {
2211                 rid = que->msix + 1;
2212                 if (que->tag != NULL) {
2213                         bus_teardown_intr(dev, que->res, que->tag);
2214                         que->tag = NULL;
2215                 }
2216                 if (que->res != NULL)
2217                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2218         }
2219
2220 early:
2221         /* Clean the AdminQ interrupt last */
2222         if (pf->admvec) /* we are doing MSIX */
2223                 rid = pf->admvec + 1;
2224         else
2225                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2226
2227         if (pf->tag != NULL) {
2228                 bus_teardown_intr(dev, pf->res, pf->tag);
2229                 pf->tag = NULL;
2230         }
2231         if (pf->res != NULL)
2232                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2233
2234         if (pf->msix)
2235                 pci_release_msi(dev);
2236
2237         if (pf->msix_mem != NULL)
2238                 bus_release_resource(dev, SYS_RES_MEMORY,
2239                     memrid, pf->msix_mem);
2240
2241         if (pf->pci_mem != NULL)
2242                 bus_release_resource(dev, SYS_RES_MEMORY,
2243                     PCIR_BAR(0), pf->pci_mem);
2244
2245         return;
2246 }
2247
2248
2249 /*********************************************************************
2250  *
2251  *  Setup networking device structure and register an interface.
2252  *
2253  **********************************************************************/
2254 static int
2255 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2256 {
2257         struct ifnet            *ifp;
2258         struct i40e_hw          *hw = vsi->hw;
2259         struct ixl_queue        *que = vsi->queues;
2260         struct i40e_aq_get_phy_abilities_resp abilities_resp;
2261         enum i40e_status_code aq_error = 0;
2262
2263         INIT_DEBUGOUT("ixl_setup_interface: begin");
2264
2265         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2266         if (ifp == NULL) {
2267                 device_printf(dev, "can not allocate ifnet structure\n");
2268                 return (-1);
2269         }
2270         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2271         ifp->if_mtu = ETHERMTU;
2272         ifp->if_baudrate = 4000000000;  // ??
2273         ifp->if_init = ixl_init;
2274         ifp->if_softc = vsi;
2275         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2276         ifp->if_ioctl = ixl_ioctl;
2277
2278         ifp->if_transmit = ixl_mq_start;
2279
2280         ifp->if_qflush = ixl_qflush;
2281
2282         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2283
2284         ether_ifattach(ifp, hw->mac.addr);
2285
2286         vsi->max_frame_size =
2287             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2288             + ETHER_VLAN_ENCAP_LEN;
2289
2290         /*
2291          * Tell the upper layer(s) we support long frames.
2292          */
2293         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2294
2295         ifp->if_capabilities |= IFCAP_HWCSUM;
2296         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2297         ifp->if_capabilities |= IFCAP_TSO;
2298         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2299         ifp->if_capabilities |= IFCAP_LRO;
2300
2301         /* VLAN capabilties */
2302         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2303                              |  IFCAP_VLAN_HWTSO
2304                              |  IFCAP_VLAN_MTU
2305                              |  IFCAP_VLAN_HWCSUM;
2306         ifp->if_capenable = ifp->if_capabilities;
2307
2308         /*
2309         ** Don't turn this on by default, if vlans are
2310         ** created on another pseudo device (eg. lagg)
2311         ** then vlan events are not passed thru, breaking
2312         ** operation, but with HW FILTER off it works. If
2313         ** using vlans directly on the ixl driver you can
2314         ** enable this and get full hardware tag filtering.
2315         */
2316         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2317
2318         /*
2319          * Specify the media types supported by this adapter and register
2320          * callbacks to update media and link information
2321          */
2322         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2323                      ixl_media_status);
2324
2325         aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2326         if (aq_error) {
2327                 printf("Error getting supported media types, AQ error %d\n", aq_error);
2328                 return (EPERM);
2329         }
2330
2331         /* Display supported media types */
2332         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2333                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2334
2335         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2336                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2337
2338         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2339             abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2340                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2341         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2342                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2343         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2344                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2345         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2346                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2347                 
2348         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2349             abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2350                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2351         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2352                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2353         if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2354                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2355
2356         /* Use autoselect media by default */
2357         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2358         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2359
2360         return (0);
2361 }
2362
2363 static bool
2364 ixl_config_link(struct i40e_hw *hw)
2365 {
2366         bool check;
2367
2368         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2369         check = i40e_get_link_status(hw);
2370 #ifdef IXL_DEBUG
2371         printf("Link is %s\n", check ? "up":"down");
2372 #endif
2373         return (check);
2374 }
2375
2376 /*********************************************************************
2377  *
2378  *  Initialize this VSI 
2379  *
2380  **********************************************************************/
2381 static int
2382 ixl_setup_vsi(struct ixl_vsi *vsi)
2383 {
2384         struct i40e_hw  *hw = vsi->hw;
2385         device_t        dev = vsi->dev;
2386         struct i40e_aqc_get_switch_config_resp *sw_config;
2387         struct i40e_vsi_context ctxt;
2388         u8      aq_buf[I40E_AQ_LARGE_BUF];
2389         int     ret = I40E_SUCCESS;
2390         u16     next = 0;
2391
2392         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2393         ret = i40e_aq_get_switch_config(hw, sw_config,
2394             sizeof(aq_buf), &next, NULL);
2395         if (ret) {
2396                 device_printf(dev,"aq_get_switch_config failed!!\n");
2397                 return (ret);
2398         }
2399 #ifdef IXL_DEBUG
2400         printf("Switch config: header reported: %d in structure, %d total\n",
2401             sw_config->header.num_reported, sw_config->header.num_total);
2402         printf("type=%d seid=%d uplink=%d downlink=%d\n",
2403             sw_config->element[0].element_type,
2404             sw_config->element[0].seid,
2405             sw_config->element[0].uplink_seid,
2406             sw_config->element[0].downlink_seid);
2407 #endif
2408         /* Save off this important value */
2409         vsi->seid = sw_config->element[0].seid;
2410
2411         memset(&ctxt, 0, sizeof(ctxt));
2412         ctxt.seid = vsi->seid;
2413         ctxt.pf_num = hw->pf_id;
2414         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2415         if (ret) {
2416                 device_printf(dev,"get vsi params failed %x!!\n", ret);
2417                 return (ret);
2418         }
2419 #ifdef IXL_DEBUG
2420         printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2421             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2422             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2423             ctxt.uplink_seid, ctxt.vsi_number,
2424             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2425             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2426             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2427 #endif
2428         /*
2429         ** Set the queue and traffic class bits
2430         **  - when multiple traffic classes are supported
2431         **    this will need to be more robust.
2432         */
2433         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2434         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2435         ctxt.info.queue_mapping[0] = 0; 
2436         ctxt.info.tc_mapping[0] = 0x0800; 
2437
2438         /* Set VLAN receive stripping mode */
2439         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2440         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2441         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2442             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2443         else
2444             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2445
2446         /* Keep copy of VSI info in VSI for statistic counters */
2447         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2448
2449         /* Reset VSI statistics */
2450         ixl_vsi_reset_stats(vsi);
2451         vsi->hw_filters_add = 0;
2452         vsi->hw_filters_del = 0;
2453
2454         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2455         if (ret)
2456                 device_printf(dev,"update vsi params failed %x!!\n",
2457                    hw->aq.asq_last_status);
2458         return (ret);
2459 }
2460
2461
2462 /*********************************************************************
2463  *
2464  *  Initialize the VSI:  this handles contexts, which means things
2465  *                       like the number of descriptors, buffer size,
2466  *                       plus we init the rings thru this function.
2467  *
2468  **********************************************************************/
2469 static int
2470 ixl_initialize_vsi(struct ixl_vsi *vsi)
2471 {
2472         struct ixl_queue        *que = vsi->queues;
2473         device_t                dev = vsi->dev;
2474         struct i40e_hw          *hw = vsi->hw;
2475         int                     err = 0;
2476
2477
2478         for (int i = 0; i < vsi->num_queues; i++, que++) {
2479                 struct tx_ring          *txr = &que->txr;
2480                 struct rx_ring          *rxr = &que->rxr;
2481                 struct i40e_hmc_obj_txq tctx;
2482                 struct i40e_hmc_obj_rxq rctx;
2483                 u32                     txctl;
2484                 u16                     size;
2485
2486
2487                 /* Setup the HMC TX Context  */
2488                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2489                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2490                 tctx.new_context = 1;
2491                 tctx.base = (txr->dma.pa/128);
2492                 tctx.qlen = que->num_desc;
2493                 tctx.fc_ena = 0;
2494                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2495                 /* Enable HEAD writeback */
2496                 tctx.head_wb_ena = 1;
2497                 tctx.head_wb_addr = txr->dma.pa +
2498                     (que->num_desc * sizeof(struct i40e_tx_desc));
2499                 tctx.rdylist_act = 0;
2500                 err = i40e_clear_lan_tx_queue_context(hw, i);
2501                 if (err) {
2502                         device_printf(dev, "Unable to clear TX context\n");
2503                         break;
2504                 }
2505                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2506                 if (err) {
2507                         device_printf(dev, "Unable to set TX context\n");
2508                         break;
2509                 }
2510                 /* Associate the ring with this PF */
2511                 txctl = I40E_QTX_CTL_PF_QUEUE;
2512                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2513                     I40E_QTX_CTL_PF_INDX_MASK);
2514                 wr32(hw, I40E_QTX_CTL(i), txctl);
2515                 ixl_flush(hw);
2516
2517                 /* Do ring (re)init */
2518                 ixl_init_tx_ring(que);
2519
2520                 /* Next setup the HMC RX Context  */
2521                 if (vsi->max_frame_size <= 2048)
2522                         rxr->mbuf_sz = MCLBYTES;
2523                 else
2524                         rxr->mbuf_sz = MJUMPAGESIZE;
2525
2526                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2527
2528                 /* Set up an RX context for the HMC */
2529                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2530                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2531                 /* ignore header split for now */
2532                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2533                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2534                     vsi->max_frame_size : max_rxmax;
2535                 rctx.dtype = 0;
2536                 rctx.dsize = 1; /* do 32byte descriptors */
2537                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2538                 rctx.base = (rxr->dma.pa/128);
2539                 rctx.qlen = que->num_desc;
2540                 rctx.tphrdesc_ena = 1;
2541                 rctx.tphwdesc_ena = 1;
2542                 rctx.tphdata_ena = 0;
2543                 rctx.tphhead_ena = 0;
2544                 rctx.lrxqthresh = 2;
2545                 rctx.crcstrip = 1;
2546                 rctx.l2tsel = 1;
2547                 rctx.showiv = 1;
2548                 rctx.fc_ena = 0;
2549                 rctx.prefena = 1;
2550
2551                 err = i40e_clear_lan_rx_queue_context(hw, i);
2552                 if (err) {
2553                         device_printf(dev,
2554                             "Unable to clear RX context %d\n", i);
2555                         break;
2556                 }
2557                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2558                 if (err) {
2559                         device_printf(dev, "Unable to set RX context %d\n", i);
2560                         break;
2561                 }
2562                 err = ixl_init_rx_ring(que);
2563                 if (err) {
2564                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2565                         break;
2566                 }
2567                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2568                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2569         }
2570         return (err);
2571 }
2572
2573
2574 /*********************************************************************
2575  *
2576  *  Free all VSI structs.
2577  *
2578  **********************************************************************/
2579 void
2580 ixl_free_vsi(struct ixl_vsi *vsi)
2581 {
2582         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2583         struct ixl_queue        *que = vsi->queues;
2584         struct ixl_mac_filter *f;
2585
2586         /* Free station queues */
2587         for (int i = 0; i < vsi->num_queues; i++, que++) {
2588                 struct tx_ring *txr = &que->txr;
2589                 struct rx_ring *rxr = &que->rxr;
2590         
2591                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2592                         continue;
2593                 IXL_TX_LOCK(txr);
2594                 ixl_free_que_tx(que);
2595                 if (txr->base)
2596                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2597                 IXL_TX_UNLOCK(txr);
2598                 IXL_TX_LOCK_DESTROY(txr);
2599
2600                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2601                         continue;
2602                 IXL_RX_LOCK(rxr);
2603                 ixl_free_que_rx(que);
2604                 if (rxr->base)
2605                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2606                 IXL_RX_UNLOCK(rxr);
2607                 IXL_RX_LOCK_DESTROY(rxr);
2608                 
2609         }
2610         free(vsi->queues, M_DEVBUF);
2611
2612         /* Free VSI filter list */
2613         while (!SLIST_EMPTY(&vsi->ftl)) {
2614                 f = SLIST_FIRST(&vsi->ftl);
2615                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2616                 free(f, M_DEVBUF);
2617         }
2618 }
2619
2620
2621 /*********************************************************************
2622  *
2623  *  Allocate memory for the VSI (virtual station interface) and their
2624  *  associated queues, rings and the descriptors associated with each,
2625  *  called only once at attach.
2626  *
2627  **********************************************************************/
2628 static int
2629 ixl_setup_stations(struct ixl_pf *pf)
2630 {
2631         device_t                dev = pf->dev;
2632         struct ixl_vsi          *vsi;
2633         struct ixl_queue        *que;
2634         struct tx_ring          *txr;
2635         struct rx_ring          *rxr;
2636         int                     rsize, tsize;
2637         int                     error = I40E_SUCCESS;
2638
2639         vsi = &pf->vsi;
2640         vsi->back = (void *)pf;
2641         vsi->hw = &pf->hw;
2642         vsi->id = 0;
2643         vsi->num_vlans = 0;
2644
2645         /* Get memory for the station queues */
2646         if (!(vsi->queues =
2647             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2648             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2649                 device_printf(dev, "Unable to allocate queue memory\n");
2650                 error = ENOMEM;
2651                 goto early;
2652         }
2653
2654         for (int i = 0; i < vsi->num_queues; i++) {
2655                 que = &vsi->queues[i];
2656                 que->num_desc = ixl_ringsz;
2657                 que->me = i;
2658                 que->vsi = vsi;
2659                 /* mark the queue as active */
2660                 vsi->active_queues |= (u64)1 << que->me;
2661                 txr = &que->txr;
2662                 txr->que = que;
2663                 txr->tail = I40E_QTX_TAIL(que->me);
2664
2665                 /* Initialize the TX lock */
2666                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2667                     device_get_nameunit(dev), que->me);
2668                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2669                 /* Create the TX descriptor ring */
2670                 tsize = roundup2((que->num_desc *
2671                     sizeof(struct i40e_tx_desc)) +
2672                     sizeof(u32), DBA_ALIGN);
2673                 if (i40e_allocate_dma_mem(&pf->hw,
2674                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2675                         device_printf(dev,
2676                             "Unable to allocate TX Descriptor memory\n");
2677                         error = ENOMEM;
2678                         goto fail;
2679                 }
2680                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2681                 bzero((void *)txr->base, tsize);
2682                 /* Now allocate transmit soft structs for the ring */
2683                 if (ixl_allocate_tx_data(que)) {
2684                         device_printf(dev,
2685                             "Critical Failure setting up TX structures\n");
2686                         error = ENOMEM;
2687                         goto fail;
2688                 }
2689                 /* Allocate a buf ring */
2690                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2691                     M_WAITOK, &txr->mtx);
2692                 if (txr->br == NULL) {
2693                         device_printf(dev,
2694                             "Critical Failure setting up TX buf ring\n");
2695                         error = ENOMEM;
2696                         goto fail;
2697                 }
2698
2699                 /*
2700                  * Next the RX queues...
2701                  */ 
2702                 rsize = roundup2(que->num_desc *
2703                     sizeof(union i40e_rx_desc), DBA_ALIGN);
2704                 rxr = &que->rxr;
2705                 rxr->que = que;
2706                 rxr->tail = I40E_QRX_TAIL(que->me);
2707
2708                 /* Initialize the RX side lock */
2709                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2710                     device_get_nameunit(dev), que->me);
2711                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2712
2713                 if (i40e_allocate_dma_mem(&pf->hw,
2714                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2715                         device_printf(dev,
2716                             "Unable to allocate RX Descriptor memory\n");
2717                         error = ENOMEM;
2718                         goto fail;
2719                 }
2720                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2721                 bzero((void *)rxr->base, rsize);
2722
2723                 /* Allocate receive soft structs for the ring*/
2724                 if (ixl_allocate_rx_data(que)) {
2725                         device_printf(dev,
2726                             "Critical Failure setting up receive structs\n");
2727                         error = ENOMEM;
2728                         goto fail;
2729                 }
2730         }
2731
2732         return (0);
2733
2734 fail:
2735         for (int i = 0; i < vsi->num_queues; i++) {
2736                 que = &vsi->queues[i];
2737                 rxr = &que->rxr;
2738                 txr = &que->txr;
2739                 if (rxr->base)
2740                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2741                 if (txr->base)
2742                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2743         }
2744
2745 early:
2746         return (error);
2747 }
2748
2749 /*
2750 ** Provide a update to the queue RX
2751 ** interrupt moderation value.
2752 */
2753 static void
2754 ixl_set_queue_rx_itr(struct ixl_queue *que)
2755 {
2756         struct ixl_vsi  *vsi = que->vsi;
2757         struct i40e_hw  *hw = vsi->hw;
2758         struct rx_ring  *rxr = &que->rxr;
2759         u16             rx_itr;
2760         u16             rx_latency = 0;
2761         int             rx_bytes;
2762
2763
2764         /* Idle, do nothing */
2765         if (rxr->bytes == 0)
2766                 return;
2767
2768         if (ixl_dynamic_rx_itr) {
2769                 rx_bytes = rxr->bytes/rxr->itr;
2770                 rx_itr = rxr->itr;
2771
2772                 /* Adjust latency range */
2773                 switch (rxr->latency) {
2774                 case IXL_LOW_LATENCY:
2775                         if (rx_bytes > 10) {
2776                                 rx_latency = IXL_AVE_LATENCY;
2777                                 rx_itr = IXL_ITR_20K;
2778                         }
2779                         break;
2780                 case IXL_AVE_LATENCY:
2781                         if (rx_bytes > 20) {
2782                                 rx_latency = IXL_BULK_LATENCY;
2783                                 rx_itr = IXL_ITR_8K;
2784                         } else if (rx_bytes <= 10) {
2785                                 rx_latency = IXL_LOW_LATENCY;
2786                                 rx_itr = IXL_ITR_100K;
2787                         }
2788                         break;
2789                 case IXL_BULK_LATENCY:
2790                         if (rx_bytes <= 20) {
2791                                 rx_latency = IXL_AVE_LATENCY;
2792                                 rx_itr = IXL_ITR_20K;
2793                         }
2794                         break;
2795                  }
2796
2797                 rxr->latency = rx_latency;
2798
2799                 if (rx_itr != rxr->itr) {
2800                         /* do an exponential smoothing */
2801                         rx_itr = (10 * rx_itr * rxr->itr) /
2802                             ((9 * rx_itr) + rxr->itr);
2803                         rxr->itr = rx_itr & IXL_MAX_ITR;
2804                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2805                             que->me), rxr->itr);
2806                 }
2807         } else { /* We may have have toggled to non-dynamic */
2808                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2809                         vsi->rx_itr_setting = ixl_rx_itr;
2810                 /* Update the hardware if needed */
2811                 if (rxr->itr != vsi->rx_itr_setting) {
2812                         rxr->itr = vsi->rx_itr_setting;
2813                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2814                             que->me), rxr->itr);
2815                 }
2816         }
2817         rxr->bytes = 0;
2818         rxr->packets = 0;
2819         return;
2820 }
2821
2822
2823 /*
2824 ** Provide a update to the queue TX
2825 ** interrupt moderation value.
2826 */
2827 static void
2828 ixl_set_queue_tx_itr(struct ixl_queue *que)
2829 {
2830         struct ixl_vsi  *vsi = que->vsi;
2831         struct i40e_hw  *hw = vsi->hw;
2832         struct tx_ring  *txr = &que->txr;
2833         u16             tx_itr;
2834         u16             tx_latency = 0;
2835         int             tx_bytes;
2836
2837
2838         /* Idle, do nothing */
2839         if (txr->bytes == 0)
2840                 return;
2841
2842         if (ixl_dynamic_tx_itr) {
2843                 tx_bytes = txr->bytes/txr->itr;
2844                 tx_itr = txr->itr;
2845
2846                 switch (txr->latency) {
2847                 case IXL_LOW_LATENCY:
2848                         if (tx_bytes > 10) {
2849                                 tx_latency = IXL_AVE_LATENCY;
2850                                 tx_itr = IXL_ITR_20K;
2851                         }
2852                         break;
2853                 case IXL_AVE_LATENCY:
2854                         if (tx_bytes > 20) {
2855                                 tx_latency = IXL_BULK_LATENCY;
2856                                 tx_itr = IXL_ITR_8K;
2857                         } else if (tx_bytes <= 10) {
2858                                 tx_latency = IXL_LOW_LATENCY;
2859                                 tx_itr = IXL_ITR_100K;
2860                         }
2861                         break;
2862                 case IXL_BULK_LATENCY:
2863                         if (tx_bytes <= 20) {
2864                                 tx_latency = IXL_AVE_LATENCY;
2865                                 tx_itr = IXL_ITR_20K;
2866                         }
2867                         break;
2868                 }
2869
2870                 txr->latency = tx_latency;
2871
2872                 if (tx_itr != txr->itr) {
2873                  /* do an exponential smoothing */
2874                         tx_itr = (10 * tx_itr * txr->itr) /
2875                             ((9 * tx_itr) + txr->itr);
2876                         txr->itr = tx_itr & IXL_MAX_ITR;
2877                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2878                             que->me), txr->itr);
2879                 }
2880
2881         } else { /* We may have have toggled to non-dynamic */
2882                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2883                         vsi->tx_itr_setting = ixl_tx_itr;
2884                 /* Update the hardware if needed */
2885                 if (txr->itr != vsi->tx_itr_setting) {
2886                         txr->itr = vsi->tx_itr_setting;
2887                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2888                             que->me), txr->itr);
2889                 }
2890         }
2891         txr->bytes = 0;
2892         txr->packets = 0;
2893         return;
2894 }
2895
2896
2897 static void
2898 ixl_add_hw_stats(struct ixl_pf *pf)
2899 {
2900         device_t dev = pf->dev;
2901         struct ixl_vsi *vsi = &pf->vsi;
2902         struct ixl_queue *queues = vsi->queues;
2903         struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2904         struct i40e_hw_port_stats *pf_stats = &pf->stats;
2905
2906         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2907         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2908         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2909
2910         struct sysctl_oid *vsi_node, *queue_node;
2911         struct sysctl_oid_list *vsi_list, *queue_list;
2912
2913         struct tx_ring *txr;
2914         struct rx_ring *rxr;
2915
2916         /* Driver statistics */
2917         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2918                         CTLFLAG_RD, &pf->watchdog_events,
2919                         "Watchdog timeouts");
2920         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2921                         CTLFLAG_RD, &pf->admin_irq,
2922                         "Admin Queue IRQ Handled");
2923
2924         /* VSI statistics */
2925 #define QUEUE_NAME_LEN 32
2926         char queue_namebuf[QUEUE_NAME_LEN];
2927         
2928         // ERJ: Only one vsi now, re-do when >1 VSI enabled
2929         // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2930         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2931                                    CTLFLAG_RD, NULL, "VSI-specific stats");
2932         vsi_list = SYSCTL_CHILDREN(vsi_node);
2933
2934         ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2935
2936         /* Queue statistics */
2937         for (int q = 0; q < vsi->num_queues; q++) {
2938                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2939                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2940                                              CTLFLAG_RD, NULL, "Queue #");
2941                 queue_list = SYSCTL_CHILDREN(queue_node);
2942
2943                 txr = &(queues[q].txr);
2944                 rxr = &(queues[q].rxr);
2945
2946                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2947                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2948                                 "m_defrag() failed");
2949                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2950                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
2951                                 "Driver dropped packets");
2952                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2953                                 CTLFLAG_RD, &(queues[q].irqs),
2954                                 "irqs on this queue");
2955                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2956                                 CTLFLAG_RD, &(queues[q].tso),
2957                                 "TSO");
2958                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2959                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
2960                                 "Driver tx dma failure in xmit");
2961                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2962                                 CTLFLAG_RD, &(txr->no_desc),
2963                                 "Queue No Descriptor Available");
2964                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2965                                 CTLFLAG_RD, &(txr->total_packets),
2966                                 "Queue Packets Transmitted");
2967                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2968                                 CTLFLAG_RD, &(txr->tx_bytes),
2969                                 "Queue Bytes Transmitted");
2970                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2971                                 CTLFLAG_RD, &(rxr->rx_packets),
2972                                 "Queue Packets Received");
2973                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2974                                 CTLFLAG_RD, &(rxr->rx_bytes),
2975                                 "Queue Bytes Received");
2976         }
2977
2978         /* MAC stats */
2979         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2980 }
2981
2982 static void
2983 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2984         struct sysctl_oid_list *child,
2985         struct i40e_eth_stats *eth_stats)
2986 {
2987         struct ixl_sysctl_info ctls[] =
2988         {
2989                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2990                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2991                         "Unicast Packets Received"},
2992                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2993                         "Multicast Packets Received"},
2994                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2995                         "Broadcast Packets Received"},
2996                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2997                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2998                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2999                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3000                         "Multicast Packets Transmitted"},
3001                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3002                         "Broadcast Packets Transmitted"},
3003                 {&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3004                 // end
3005                 {0,0,0}
3006         };
3007
3008         struct ixl_sysctl_info *entry = ctls;
3009         while (entry->stat != 0)
3010         {
3011                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3012                                 CTLFLAG_RD, entry->stat,
3013                                 entry->description);
3014                 entry++;
3015         }
3016 }
3017
3018 static void
3019 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3020         struct sysctl_oid_list *child,
3021         struct i40e_hw_port_stats *stats)
3022 {
3023         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3024                                     CTLFLAG_RD, NULL, "Mac Statistics");
3025         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3026
3027         struct i40e_eth_stats *eth_stats = &stats->eth;
3028         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3029
3030         struct ixl_sysctl_info ctls[] = 
3031         {
3032                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3033                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3034                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3035                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3036                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3037                 /* Packet Reception Stats */
3038                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3039                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3040                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3041                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3042                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3043                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3044                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3045                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3046                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3047                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3048                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3049                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3050                 /* Packet Transmission Stats */
3051                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3052                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3053                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3054                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3055                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3056                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3057                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3058                 /* Flow control */
3059                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3060                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3061                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3062                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3063                 /* End */
3064                 {0,0,0}
3065         };
3066
3067         struct ixl_sysctl_info *entry = ctls;
3068         while (entry->stat != 0)
3069         {
3070                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3071                                 CTLFLAG_RD, entry->stat,
3072                                 entry->description);
3073                 entry++;
3074         }
3075 }
3076
3077 /*
3078 ** ixl_config_rss - setup RSS 
3079 **  - note this is done for the single vsi
3080 */
3081 static void ixl_config_rss(struct ixl_vsi *vsi)
3082 {
3083         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3084         struct i40e_hw  *hw = vsi->hw;
3085         u32             lut = 0;
3086         u64             set_hena, hena;
3087         int             i, j;
3088
3089         static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3090             0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3091             0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3092             0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3093
3094         /* Fill out hash function seed */
3095         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3096                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3097
3098         /* Enable PCTYPES for RSS: */
3099         set_hena =
3100                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3101                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3102                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3103                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3104                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3105                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3106                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3107                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3108                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3109                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3110                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3111
3112         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3113             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3114         hena |= set_hena;
3115         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3116         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3117
3118         /* Populate the LUT with max no. of queues in round robin fashion */
3119         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3120                 if (j == vsi->num_queues)
3121                         j = 0;
3122                 /* lut = 4-byte sliding window of 4 lut entries */
3123                 lut = (lut << 8) | (j &
3124                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3125                 /* On i = 3, we have 4 entries in lut; write to the register */
3126                 if ((i & 3) == 3)
3127                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3128         }
3129         ixl_flush(hw);
3130 }
3131
3132
3133 /*
3134 ** This routine is run via an vlan config EVENT,
3135 ** it enables us to use the HW Filter table since
3136 ** we can get the vlan id. This just creates the
3137 ** entry in the soft version of the VFTA, init will
3138 ** repopulate the real table.
3139 */
3140 static void
3141 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3142 {
3143         struct ixl_vsi  *vsi = ifp->if_softc;
3144         struct i40e_hw  *hw = vsi->hw;
3145         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3146
3147         if (ifp->if_softc !=  arg)   /* Not our event */
3148                 return;
3149
3150         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3151                 return;
3152
3153         IXL_PF_LOCK(pf);
3154         ++vsi->num_vlans;
3155         ixl_add_filter(vsi, hw->mac.addr, vtag);
3156         IXL_PF_UNLOCK(pf);
3157 }
3158
3159 /*
3160 ** This routine is run via an vlan
3161 ** unconfig EVENT, remove our entry
3162 ** in the soft vfta.
3163 */
3164 static void
3165 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3166 {
3167         struct ixl_vsi  *vsi = ifp->if_softc;
3168         struct i40e_hw  *hw = vsi->hw;
3169         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3170
3171         if (ifp->if_softc !=  arg)
3172                 return;
3173
3174         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3175                 return;
3176
3177         IXL_PF_LOCK(pf);
3178         --vsi->num_vlans;
3179         ixl_del_filter(vsi, hw->mac.addr, vtag);
3180         IXL_PF_UNLOCK(pf);
3181 }
3182
3183 /*
3184 ** This routine updates vlan filters, called by init
3185 ** it scans the filter table and then updates the hw
3186 ** after a soft reset.
3187 */
3188 static void
3189 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3190 {
3191         struct ixl_mac_filter   *f;
3192         int                     cnt = 0, flags;
3193
3194         if (vsi->num_vlans == 0)
3195                 return;
3196         /*
3197         ** Scan the filter list for vlan entries,
3198         ** mark them for addition and then call
3199         ** for the AQ update.
3200         */
3201         SLIST_FOREACH(f, &vsi->ftl, next) {
3202                 if (f->flags & IXL_FILTER_VLAN) {
3203                         f->flags |=
3204                             (IXL_FILTER_ADD |
3205                             IXL_FILTER_USED);
3206                         cnt++;
3207                 }
3208         }
3209         if (cnt == 0) {
3210                 printf("setup vlan: no filters found!\n");
3211                 return;
3212         }
3213         flags = IXL_FILTER_VLAN;
3214         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3215         ixl_add_hw_filters(vsi, flags, cnt);
3216         return;
3217 }
3218
3219 /*
3220 ** Initialize filter list and add filters that the hardware
3221 ** needs to know about.
3222 */
3223 static void
3224 ixl_init_filters(struct ixl_vsi *vsi)
3225 {
3226         /* Add broadcast address */
3227         u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3228         ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3229 }
3230
3231 /*
3232 ** This routine adds mulicast filters
3233 */
3234 static void
3235 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3236 {
3237         struct ixl_mac_filter *f;
3238
3239         /* Does one already exist */
3240         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3241         if (f != NULL)
3242                 return;
3243
3244         f = ixl_get_filter(vsi);
3245         if (f == NULL) {
3246                 printf("WARNING: no filter available!!\n");
3247                 return;
3248         }
3249         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3250         f->vlan = IXL_VLAN_ANY;
3251         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3252             | IXL_FILTER_MC);
3253
3254         return;
3255 }
3256
3257 /*
3258 ** This routine adds macvlan filters
3259 */
3260 static void
3261 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3262 {
3263         struct ixl_mac_filter   *f, *tmp;
3264         device_t                dev = vsi->dev;
3265
3266         DEBUGOUT("ixl_add_filter: begin");
3267
3268         /* Does one already exist */
3269         f = ixl_find_filter(vsi, macaddr, vlan);
3270         if (f != NULL)
3271                 return;
3272         /*
3273         ** Is this the first vlan being registered, if so we
3274         ** need to remove the ANY filter that indicates we are
3275         ** not in a vlan, and replace that with a 0 filter.
3276         */
3277         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3278                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3279                 if (tmp != NULL) {
3280                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3281                         ixl_add_filter(vsi, macaddr, 0);
3282                 }
3283         }
3284
3285         f = ixl_get_filter(vsi);
3286         if (f == NULL) {
3287                 device_printf(dev, "WARNING: no filter available!!\n");
3288                 return;
3289         }
3290         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3291         f->vlan = vlan;
3292         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3293         if (f->vlan != IXL_VLAN_ANY)
3294                 f->flags |= IXL_FILTER_VLAN;
3295
3296         ixl_add_hw_filters(vsi, f->flags, 1);
3297         return;
3298 }
3299
3300 static void
3301 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3302 {
3303         struct ixl_mac_filter *f;
3304
3305         f = ixl_find_filter(vsi, macaddr, vlan);
3306         if (f == NULL)
3307                 return;
3308
3309         f->flags |= IXL_FILTER_DEL;
3310         ixl_del_hw_filters(vsi, 1);
3311
3312         /* Check if this is the last vlan removal */
3313         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3314                 /* Switch back to a non-vlan filter */
3315                 ixl_del_filter(vsi, macaddr, 0);
3316                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3317         }
3318         return;
3319 }
3320
3321 /*
3322 ** Find the filter with both matching mac addr and vlan id
3323 */
3324 static struct ixl_mac_filter *
3325 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3326 {
3327         struct ixl_mac_filter   *f;
3328         bool                    match = FALSE;
3329
3330         SLIST_FOREACH(f, &vsi->ftl, next) {
3331                 if (!cmp_etheraddr(f->macaddr, macaddr))
3332                         continue;
3333                 if (f->vlan == vlan) {
3334                         match = TRUE;
3335                         break;
3336                 }
3337         }       
3338
3339         if (!match)
3340                 f = NULL;
3341         return (f);
3342 }
3343
3344 /*
3345 ** This routine takes additions to the vsi filter
3346 ** table and creates an Admin Queue call to create
3347 ** the filters in the hardware.
3348 */
3349 static void
3350 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3351 {
3352         struct i40e_aqc_add_macvlan_element_data *a, *b;
3353         struct ixl_mac_filter   *f;
3354         struct i40e_hw  *hw = vsi->hw;
3355         device_t        dev = vsi->dev;
3356         int             err, j = 0;
3357
3358         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3359             M_DEVBUF, M_NOWAIT | M_ZERO);
3360         if (a == NULL) {
3361                 device_printf(dev, "add hw filter failed to get memory\n");
3362                 return;
3363         }
3364
3365         /*
3366         ** Scan the filter list, each time we find one
3367         ** we add it to the admin queue array and turn off
3368         ** the add bit.
3369         */
3370         SLIST_FOREACH(f, &vsi->ftl, next) {
3371                 if (f->flags == flags) {
3372                         b = &a[j]; // a pox on fvl long names :)
3373                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3374                         b->vlan_tag =
3375                             (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3376                         b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3377                         f->flags &= ~IXL_FILTER_ADD;
3378                         j++;
3379                 }
3380                 if (j == cnt)
3381                         break;
3382         }
3383         if (j > 0) {
3384                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3385                 if (err) 
3386                         device_printf(dev, "aq_add_macvlan failure %d\n",
3387                             hw->aq.asq_last_status);
3388                 else
3389                         vsi->hw_filters_add += j;
3390         }
3391         free(a, M_DEVBUF);
3392         return;
3393 }
3394
3395 /*
3396 ** This routine takes removals in the vsi filter
3397 ** table and creates an Admin Queue call to delete
3398 ** the filters in the hardware.
3399 */
3400 static void
3401 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3402 {
3403         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3404         struct i40e_hw          *hw = vsi->hw;
3405         device_t                dev = vsi->dev;
3406         struct ixl_mac_filter   *f, *f_temp;
3407         int                     err, j = 0;
3408
3409         DEBUGOUT("ixl_del_hw_filters: begin\n");
3410
3411         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3412             M_DEVBUF, M_NOWAIT | M_ZERO);
3413         if (d == NULL) {
3414                 printf("del hw filter failed to get memory\n");
3415                 return;
3416         }
3417
3418         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3419                 if (f->flags & IXL_FILTER_DEL) {
3420                         e = &d[j]; // a pox on fvl long names :)
3421                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3422                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3423                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3424                         /* delete entry from vsi list */
3425                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3426                         free(f, M_DEVBUF);
3427                         j++;
3428                 }
3429                 if (j == cnt)
3430                         break;
3431         }
3432         if (j > 0) {
3433                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3434                 /* NOTE: returns ENOENT every time but seems to work fine,
3435                    so we'll ignore that specific error. */
3436                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3437                         int sc = 0;
3438                         for (int i = 0; i < j; i++)
3439                                 sc += (!d[i].error_code);
3440                         vsi->hw_filters_del += sc;
3441                         device_printf(dev,
3442                             "Failed to remove %d/%d filters, aq error %d\n",
3443                             j - sc, j, hw->aq.asq_last_status);
3444                 } else
3445                         vsi->hw_filters_del += j;
3446         }
3447         free(d, M_DEVBUF);
3448
3449         DEBUGOUT("ixl_del_hw_filters: end\n");
3450         return;
3451 }
3452
3453
3454 static void
3455 ixl_enable_rings(struct ixl_vsi *vsi)
3456 {
3457         struct i40e_hw  *hw = vsi->hw;
3458         u32             reg;
3459
3460         for (int i = 0; i < vsi->num_queues; i++) {
3461                 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3462
3463                 reg = rd32(hw, I40E_QTX_ENA(i));
3464                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3465                     I40E_QTX_ENA_QENA_STAT_MASK;
3466                 wr32(hw, I40E_QTX_ENA(i), reg);
3467                 /* Verify the enable took */
3468                 for (int j = 0; j < 10; j++) {
3469                         reg = rd32(hw, I40E_QTX_ENA(i));
3470                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3471                                 break;
3472                         i40e_msec_delay(10);
3473                 }
3474                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3475                         printf("TX queue %d disabled!\n", i);
3476
3477                 reg = rd32(hw, I40E_QRX_ENA(i));
3478                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3479                     I40E_QRX_ENA_QENA_STAT_MASK;
3480                 wr32(hw, I40E_QRX_ENA(i), reg);
3481                 /* Verify the enable took */
3482                 for (int j = 0; j < 10; j++) {
3483                         reg = rd32(hw, I40E_QRX_ENA(i));
3484                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3485                                 break;
3486                         i40e_msec_delay(10);
3487                 }
3488                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3489                         printf("RX queue %d disabled!\n", i);
3490         }
3491 }
3492
3493 static void
3494 ixl_disable_rings(struct ixl_vsi *vsi)
3495 {
3496         struct i40e_hw  *hw = vsi->hw;
3497         u32             reg;
3498
3499         for (int i = 0; i < vsi->num_queues; i++) {
3500                 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3501                 i40e_usec_delay(500);
3502
3503                 reg = rd32(hw, I40E_QTX_ENA(i));
3504                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3505                 wr32(hw, I40E_QTX_ENA(i), reg);
3506                 /* Verify the disable took */
3507                 for (int j = 0; j < 10; j++) {
3508                         reg = rd32(hw, I40E_QTX_ENA(i));
3509                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3510                                 break;
3511                         i40e_msec_delay(10);
3512                 }
3513                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3514                         printf("TX queue %d still enabled!\n", i);
3515
3516                 reg = rd32(hw, I40E_QRX_ENA(i));
3517                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3518                 wr32(hw, I40E_QRX_ENA(i), reg);
3519                 /* Verify the disable took */
3520                 for (int j = 0; j < 10; j++) {
3521                         reg = rd32(hw, I40E_QRX_ENA(i));
3522                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3523                                 break;
3524                         i40e_msec_delay(10);
3525                 }
3526                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3527                         printf("RX queue %d still enabled!\n", i);
3528         }
3529 }
3530
3531 /**
3532  * ixl_handle_mdd_event
3533  *
3534  * Called from interrupt handler to identify possibly malicious vfs
3535  * (But also detects events from the PF, as well)
3536  **/
3537 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3538 {
3539         struct i40e_hw *hw = &pf->hw;
3540         device_t dev = pf->dev;
3541         bool mdd_detected = false;
3542         bool pf_mdd_detected = false;
3543         u32 reg;
3544
3545         /* find what triggered the MDD event */
3546         reg = rd32(hw, I40E_GL_MDET_TX);
3547         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3548                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3549                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3550                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3551                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3552                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3553                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3554                 device_printf(dev,
3555                          "Malicious Driver Detection event 0x%02x"
3556                          " on TX queue %d pf number 0x%02x\n",
3557                          event, queue, pf_num);
3558                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3559                 mdd_detected = true;
3560         }
3561         reg = rd32(hw, I40E_GL_MDET_RX);
3562         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3563                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3564                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3565                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3566                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3567                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3568                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3569                 device_printf(dev,
3570                          "Malicious Driver Detection event 0x%02x"
3571                          " on RX queue %d of function 0x%02x\n",
3572                          event, queue, func);
3573                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3574                 mdd_detected = true;
3575         }
3576
3577         if (mdd_detected) {
3578                 reg = rd32(hw, I40E_PF_MDET_TX);
3579                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3580                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3581                         device_printf(dev,
3582                                  "MDD TX event is for this function 0x%08x",
3583                                  reg);
3584                         pf_mdd_detected = true;
3585                 }
3586                 reg = rd32(hw, I40E_PF_MDET_RX);
3587                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3588                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3589                         device_printf(dev,
3590                                  "MDD RX event is for this function 0x%08x",
3591                                  reg);
3592                         pf_mdd_detected = true;
3593                 }
3594         }
3595
3596         /* re-enable mdd interrupt cause */
3597         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3598         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3599         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3600         ixl_flush(hw);
3601 }
3602
3603 static void
3604 ixl_enable_intr(struct ixl_vsi *vsi)
3605 {
3606         struct i40e_hw          *hw = vsi->hw;
3607         struct ixl_queue        *que = vsi->queues;
3608
3609         if (ixl_enable_msix) {
3610                 ixl_enable_adminq(hw);
3611                 for (int i = 0; i < vsi->num_queues; i++, que++)
3612                         ixl_enable_queue(hw, que->me);
3613         } else
3614                 ixl_enable_legacy(hw);
3615 }
3616
3617 static void
3618 ixl_disable_intr(struct ixl_vsi *vsi)
3619 {
3620         struct i40e_hw          *hw = vsi->hw;
3621         struct ixl_queue        *que = vsi->queues;
3622
3623         if (ixl_enable_msix) {
3624                 ixl_disable_adminq(hw);
3625                 for (int i = 0; i < vsi->num_queues; i++, que++)
3626                         ixl_disable_queue(hw, que->me);
3627         } else
3628                 ixl_disable_legacy(hw);
3629 }
3630
3631 static void
3632 ixl_enable_adminq(struct i40e_hw *hw)
3633 {
3634         u32             reg;
3635
3636         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3637             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3638             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3639         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3640         ixl_flush(hw);
3641         return;
3642 }
3643
3644 static void
3645 ixl_disable_adminq(struct i40e_hw *hw)
3646 {
3647         u32             reg;
3648
3649         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3650         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3651
3652         return;
3653 }
3654
3655 static void
3656 ixl_enable_queue(struct i40e_hw *hw, int id)
3657 {
3658         u32             reg;
3659
3660         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3661             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3662             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3663         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3664 }
3665
3666 static void
3667 ixl_disable_queue(struct i40e_hw *hw, int id)
3668 {
3669         u32             reg;
3670
3671         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3672         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3673
3674         return;
3675 }
3676
3677 static void
3678 ixl_enable_legacy(struct i40e_hw *hw)
3679 {
3680         u32             reg;
3681         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3682             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3683             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3684         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3685 }
3686
3687 static void
3688 ixl_disable_legacy(struct i40e_hw *hw)
3689 {
3690         u32             reg;
3691
3692         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3693         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3694
3695         return;
3696 }
3697
3698 static void
3699 ixl_update_stats_counters(struct ixl_pf *pf)
3700 {
3701         struct i40e_hw  *hw = &pf->hw;
3702         struct ixl_vsi *vsi = &pf->vsi;
3703         struct ifnet    *ifp = vsi->ifp;
3704
3705         struct i40e_hw_port_stats *nsd = &pf->stats;
3706         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3707
3708         /* Update hw stats */
3709         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3710                            pf->stat_offsets_loaded,
3711                            &osd->crc_errors, &nsd->crc_errors);
3712         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3713                            pf->stat_offsets_loaded,
3714                            &osd->illegal_bytes, &nsd->illegal_bytes);
3715         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3716                            I40E_GLPRT_GORCL(hw->port),
3717                            pf->stat_offsets_loaded,
3718                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3719         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3720                            I40E_GLPRT_GOTCL(hw->port),
3721                            pf->stat_offsets_loaded,
3722                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3723         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3724                            pf->stat_offsets_loaded,
3725                            &osd->eth.rx_discards,
3726                            &nsd->eth.rx_discards);
3727         ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3728                            pf->stat_offsets_loaded,
3729                            &osd->eth.tx_discards,
3730                            &nsd->eth.tx_discards);
3731         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3732                            I40E_GLPRT_UPRCL(hw->port),
3733                            pf->stat_offsets_loaded,
3734                            &osd->eth.rx_unicast,
3735                            &nsd->eth.rx_unicast);
3736         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3737                            I40E_GLPRT_UPTCL(hw->port),
3738                            pf->stat_offsets_loaded,
3739                            &osd->eth.tx_unicast,
3740                            &nsd->eth.tx_unicast);
3741         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3742                            I40E_GLPRT_MPRCL(hw->port),
3743                            pf->stat_offsets_loaded,
3744                            &osd->eth.rx_multicast,
3745                            &nsd->eth.rx_multicast);
3746         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3747                            I40E_GLPRT_MPTCL(hw->port),
3748                            pf->stat_offsets_loaded,
3749                            &osd->eth.tx_multicast,
3750                            &nsd->eth.tx_multicast);
3751         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3752                            I40E_GLPRT_BPRCL(hw->port),
3753                            pf->stat_offsets_loaded,
3754                            &osd->eth.rx_broadcast,
3755                            &nsd->eth.rx_broadcast);
3756         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3757                            I40E_GLPRT_BPTCL(hw->port),
3758                            pf->stat_offsets_loaded,
3759                            &osd->eth.tx_broadcast,
3760                            &nsd->eth.tx_broadcast);
3761
3762         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3763                            pf->stat_offsets_loaded,
3764                            &osd->tx_dropped_link_down,
3765                            &nsd->tx_dropped_link_down);
3766         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3767                            pf->stat_offsets_loaded,
3768                            &osd->mac_local_faults,
3769                            &nsd->mac_local_faults);
3770         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3771                            pf->stat_offsets_loaded,
3772                            &osd->mac_remote_faults,
3773                            &nsd->mac_remote_faults);
3774         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3775                            pf->stat_offsets_loaded,
3776                            &osd->rx_length_errors,
3777                            &nsd->rx_length_errors);
3778
3779         /* Flow control (LFC) stats */
3780         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3781                            pf->stat_offsets_loaded,
3782                            &osd->link_xon_rx, &nsd->link_xon_rx);
3783         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3784                            pf->stat_offsets_loaded,
3785                            &osd->link_xon_tx, &nsd->link_xon_tx);
3786         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3787                            pf->stat_offsets_loaded,
3788                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3789         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3790                            pf->stat_offsets_loaded,
3791                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3792
3793         /* Priority flow control stats */
3794 #if 0
3795         for (int i = 0; i < 8; i++) {
3796                 ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3797                                    pf->stat_offsets_loaded,
3798                                    &osd->priority_xon_rx[i],
3799                                    &nsd->priority_xon_rx[i]);
3800                 ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3801                                    pf->stat_offsets_loaded,
3802                                    &osd->priority_xon_tx[i],
3803                                    &nsd->priority_xon_tx[i]);
3804                 ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3805                                    pf->stat_offsets_loaded,
3806                                    &osd->priority_xoff_tx[i],
3807                                    &nsd->priority_xoff_tx[i]);
3808                 ixl_stat_update32(hw,
3809                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3810                                    pf->stat_offsets_loaded,
3811                                    &osd->priority_xon_2_xoff[i],
3812                                    &nsd->priority_xon_2_xoff[i]);
3813         }
3814 #endif
3815
3816         /* Packet size stats rx */
3817         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3818                            I40E_GLPRT_PRC64L(hw->port),
3819                            pf->stat_offsets_loaded,
3820                            &osd->rx_size_64, &nsd->rx_size_64);
3821         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3822                            I40E_GLPRT_PRC127L(hw->port),
3823                            pf->stat_offsets_loaded,
3824                            &osd->rx_size_127, &nsd->rx_size_127);
3825         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3826                            I40E_GLPRT_PRC255L(hw->port),
3827                            pf->stat_offsets_loaded,
3828                            &osd->rx_size_255, &nsd->rx_size_255);
3829         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3830                            I40E_GLPRT_PRC511L(hw->port),
3831                            pf->stat_offsets_loaded,
3832                            &osd->rx_size_511, &nsd->rx_size_511);
3833         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3834                            I40E_GLPRT_PRC1023L(hw->port),
3835                            pf->stat_offsets_loaded,
3836                            &osd->rx_size_1023, &nsd->rx_size_1023);
3837         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3838                            I40E_GLPRT_PRC1522L(hw->port),
3839                            pf->stat_offsets_loaded,
3840                            &osd->rx_size_1522, &nsd->rx_size_1522);
3841         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3842                            I40E_GLPRT_PRC9522L(hw->port),
3843                            pf->stat_offsets_loaded,
3844                            &osd->rx_size_big, &nsd->rx_size_big);
3845
3846         /* Packet size stats tx */
3847         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3848                            I40E_GLPRT_PTC64L(hw->port),
3849                            pf->stat_offsets_loaded,
3850                            &osd->tx_size_64, &nsd->tx_size_64);
3851         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3852                            I40E_GLPRT_PTC127L(hw->port),
3853                            pf->stat_offsets_loaded,
3854                            &osd->tx_size_127, &nsd->tx_size_127);
3855         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3856                            I40E_GLPRT_PTC255L(hw->port),
3857                            pf->stat_offsets_loaded,
3858                            &osd->tx_size_255, &nsd->tx_size_255);
3859         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3860                            I40E_GLPRT_PTC511L(hw->port),
3861                            pf->stat_offsets_loaded,
3862                            &osd->tx_size_511, &nsd->tx_size_511);
3863         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3864                            I40E_GLPRT_PTC1023L(hw->port),
3865                            pf->stat_offsets_loaded,
3866                            &osd->tx_size_1023, &nsd->tx_size_1023);
3867         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3868                            I40E_GLPRT_PTC1522L(hw->port),
3869                            pf->stat_offsets_loaded,
3870                            &osd->tx_size_1522, &nsd->tx_size_1522);
3871         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3872                            I40E_GLPRT_PTC9522L(hw->port),
3873                            pf->stat_offsets_loaded,
3874                            &osd->tx_size_big, &nsd->tx_size_big);
3875
3876         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3877                            pf->stat_offsets_loaded,
3878                            &osd->rx_undersize, &nsd->rx_undersize);
3879         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3880                            pf->stat_offsets_loaded,
3881                            &osd->rx_fragments, &nsd->rx_fragments);
3882         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3883                            pf->stat_offsets_loaded,
3884                            &osd->rx_oversize, &nsd->rx_oversize);
3885         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3886                            pf->stat_offsets_loaded,
3887                            &osd->rx_jabber, &nsd->rx_jabber);
3888         pf->stat_offsets_loaded = true;
3889         /* End hw stats */
3890
3891         /* Update vsi stats */
3892         ixl_update_eth_stats(vsi);
3893
3894         /* OS statistics */
3895         // ERJ - these are per-port, update all vsis?
3896         ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
3897 }
3898
3899 /*
3900 ** Tasklet handler for MSIX Adminq interrupts
3901 **  - do outside interrupt since it might sleep
3902 */
3903 static void
3904 ixl_do_adminq(void *context, int pending)
3905 {
3906         struct ixl_pf                   *pf = context;
3907         struct i40e_hw                  *hw = &pf->hw;
3908         struct ixl_vsi                  *vsi = &pf->vsi;
3909         struct i40e_arq_event_info      event;
3910         i40e_status                     ret;
3911         u32                             reg, loop = 0;
3912         u16                             opcode, result;
3913
3914         event.msg_len = IXL_AQ_BUF_SZ;
3915         event.msg_buf = malloc(event.msg_len,
3916             M_DEVBUF, M_NOWAIT | M_ZERO);
3917         if (!event.msg_buf) {
3918                 printf("Unable to allocate adminq memory\n");
3919                 return;
3920         }
3921
3922         /* clean and process any events */
3923         do {
3924                 ret = i40e_clean_arq_element(hw, &event, &result);
3925                 if (ret)
3926                         break;
3927                 opcode = LE16_TO_CPU(event.desc.opcode);
3928                 switch (opcode) {
3929                 case i40e_aqc_opc_get_link_status:
3930                         vsi->link_up = ixl_config_link(hw);
3931                         ixl_update_link_status(pf);
3932                         break;
3933                 case i40e_aqc_opc_send_msg_to_pf:
3934                         /* process pf/vf communication here */
3935                         break;
3936                 case i40e_aqc_opc_event_lan_overflow:
3937                         break;
3938                 default:
3939 #ifdef IXL_DEBUG
3940                         printf("AdminQ unknown event %x\n", opcode);
3941 #endif
3942                         break;
3943                 }
3944
3945         } while (result && (loop++ < IXL_ADM_LIMIT));
3946
3947         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3948         reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3949         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3950         free(event.msg_buf, M_DEVBUF);
3951
3952         if (pf->msix > 1)
3953                 ixl_enable_adminq(&pf->hw);
3954         else
3955                 ixl_enable_intr(vsi);
3956 }
3957
3958 static int
3959 ixl_debug_info(SYSCTL_HANDLER_ARGS)
3960 {
3961         struct ixl_pf   *pf;
3962         int             error, input = 0;
3963
3964         error = sysctl_handle_int(oidp, &input, 0, req);
3965
3966         if (error || !req->newptr)
3967                 return (error);
3968
3969         if (input == 1) {
3970                 pf = (struct ixl_pf *)arg1;
3971                 ixl_print_debug_info(pf);
3972         }
3973
3974         return (error);
3975 }
3976
3977 static void
3978 ixl_print_debug_info(struct ixl_pf *pf)
3979 {
3980         struct i40e_hw          *hw = &pf->hw;
3981         struct ixl_vsi          *vsi = &pf->vsi;
3982         struct ixl_queue        *que = vsi->queues;
3983         struct rx_ring          *rxr = &que->rxr;
3984         struct tx_ring          *txr = &que->txr;
3985         u32                     reg;    
3986
3987
3988         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
3989         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
3990         printf("RX next check = %x\n", rxr->next_check);
3991         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
3992         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
3993         printf("TX desc avail = %x\n", txr->avail);
3994
3995         reg = rd32(hw, I40E_GLV_GORCL(0xc));
3996          printf("RX Bytes = %x\n", reg);
3997         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
3998          printf("Port RX Bytes = %x\n", reg);
3999         reg = rd32(hw, I40E_GLV_RDPC(0xc));
4000          printf("RX discard = %x\n", reg);
4001         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4002          printf("Port RX discard = %x\n", reg);
4003
4004         reg = rd32(hw, I40E_GLV_TEPC(0xc));
4005          printf("TX errors = %x\n", reg);
4006         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4007          printf("TX Bytes = %x\n", reg);
4008
4009         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4010          printf("RX undersize = %x\n", reg);
4011         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4012          printf("RX fragments = %x\n", reg);
4013         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4014          printf("RX oversize = %x\n", reg);
4015         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4016          printf("RX length error = %x\n", reg);
4017         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4018          printf("mac remote fault = %x\n", reg);
4019         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4020          printf("mac local fault = %x\n", reg);
4021 }
4022
4023 /**
4024  * Update VSI-specific ethernet statistics counters.
4025  **/
4026 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4027 {
4028         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4029         struct i40e_hw *hw = &pf->hw;
4030         struct ifnet *ifp = vsi->ifp;
4031         struct i40e_eth_stats *es;
4032         struct i40e_eth_stats *oes;
4033         u16 stat_idx = vsi->info.stat_counter_idx;
4034
4035         es = &vsi->eth_stats;
4036         oes = &vsi->eth_stats_offsets;
4037
4038         /* Gather up the stats that the hw collects */
4039         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4040                            vsi->stat_offsets_loaded,
4041                            &oes->tx_errors, &es->tx_errors);
4042         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4043                            vsi->stat_offsets_loaded,
4044                            &oes->rx_discards, &es->rx_discards);
4045
4046         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4047                            I40E_GLV_GORCL(stat_idx),
4048                            vsi->stat_offsets_loaded,
4049                            &oes->rx_bytes, &es->rx_bytes);
4050         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4051                            I40E_GLV_UPRCL(stat_idx),
4052                            vsi->stat_offsets_loaded,
4053                            &oes->rx_unicast, &es->rx_unicast);
4054         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4055                            I40E_GLV_MPRCL(stat_idx),
4056                            vsi->stat_offsets_loaded,
4057                            &oes->rx_multicast, &es->rx_multicast);
4058         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4059                            I40E_GLV_BPRCL(stat_idx),
4060                            vsi->stat_offsets_loaded,
4061                            &oes->rx_broadcast, &es->rx_broadcast);
4062
4063         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4064                            I40E_GLV_GOTCL(stat_idx),
4065                            vsi->stat_offsets_loaded,
4066                            &oes->tx_bytes, &es->tx_bytes);
4067         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4068                            I40E_GLV_UPTCL(stat_idx),
4069                            vsi->stat_offsets_loaded,
4070                            &oes->tx_unicast, &es->tx_unicast);
4071         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4072                            I40E_GLV_MPTCL(stat_idx),
4073                            vsi->stat_offsets_loaded,
4074                            &oes->tx_multicast, &es->tx_multicast);
4075         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4076                            I40E_GLV_BPTCL(stat_idx),
4077                            vsi->stat_offsets_loaded,
4078                            &oes->tx_broadcast, &es->tx_broadcast);
4079         vsi->stat_offsets_loaded = true;
4080
4081         /* Update ifnet stats */
4082         ifp->if_ipackets = es->rx_unicast +
4083                            es->rx_multicast +
4084                            es->rx_broadcast;
4085         ifp->if_opackets = es->tx_unicast +
4086                            es->tx_multicast +
4087                            es->tx_broadcast;
4088         ifp->if_ibytes = es->rx_bytes;
4089         ifp->if_obytes = es->tx_bytes;
4090         ifp->if_imcasts = es->rx_multicast;
4091         ifp->if_omcasts = es->tx_multicast;
4092
4093         ifp->if_oerrors = es->tx_errors;
4094         ifp->if_iqdrops = es->rx_discards;
4095         ifp->if_noproto = es->rx_unknown_protocol;
4096         ifp->if_collisions = 0;
4097 }
4098
4099 /**
4100  * Reset all of the stats for the given pf
4101  **/
4102 void ixl_pf_reset_stats(struct ixl_pf *pf)
4103 {
4104         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4105         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4106         pf->stat_offsets_loaded = false;
4107 }
4108
4109 /**
4110  * Resets all stats of the given vsi
4111  **/
4112 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4113 {
4114         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4115         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4116         vsi->stat_offsets_loaded = false;
4117 }
4118
4119 /**
4120  * Read and update a 48 bit stat from the hw
4121  *
4122  * Since the device stats are not reset at PFReset, they likely will not
4123  * be zeroed when the driver starts.  We'll save the first values read
4124  * and use them as offsets to be subtracted from the raw values in order
4125  * to report stats that count from zero.
4126  **/
4127 static void
4128 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4129         bool offset_loaded, u64 *offset, u64 *stat)
4130 {
4131         u64 new_data;
4132
4133 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4134         new_data = rd64(hw, loreg);
4135 #else
4136         /*
4137          * Use two rd32's instead of one rd64; FreeBSD versions before
4138          * 10 don't support 8 byte bus reads/writes.
4139          */
4140         new_data = rd32(hw, loreg);
4141         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4142 #endif
4143
4144         if (!offset_loaded)
4145                 *offset = new_data;
4146         if (new_data >= *offset)
4147                 *stat = new_data - *offset;
4148         else
4149                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4150         *stat &= 0xFFFFFFFFFFFFULL;
4151 }
4152
4153 /**
4154  * Read and update a 32 bit stat from the hw
4155  **/
4156 static void
4157 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4158         bool offset_loaded, u64 *offset, u64 *stat)
4159 {
4160         u32 new_data;
4161
4162         new_data = rd32(hw, reg);
4163         if (!offset_loaded)
4164                 *offset = new_data;
4165         if (new_data >= *offset)
4166                 *stat = (u32)(new_data - *offset);
4167         else
4168                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4169 }
4170
4171 /*
4172 ** Set flow control using sysctl:
4173 **      0 - off
4174 **      1 - rx pause
4175 **      2 - tx pause
4176 **      3 - full
4177 */
4178 static int
4179 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4180 {
4181         /*
4182          * TODO: ensure flow control is disabled if
4183          * priority flow control is enabled
4184          *
4185          * TODO: ensure tx CRC by hardware should be enabled
4186          * if tx flow control is enabled.
4187          */
4188         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4189         struct i40e_hw *hw = &pf->hw;
4190         device_t dev = pf->dev;
4191         int requested_fc = 0, error = 0;
4192         enum i40e_status_code aq_error = 0;
4193         u8 fc_aq_err = 0;
4194
4195         aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4196         if (aq_error) {
4197                 device_printf(dev,
4198                     "%s: Error retrieving link info from aq, %d\n",
4199                     __func__, aq_error);
4200                 return (EAGAIN);
4201         }
4202
4203         /* Read in new mode */
4204         requested_fc = hw->fc.current_mode;
4205         error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4206         if ((error) || (req->newptr == NULL))
4207                 return (error);
4208         if (requested_fc < 0 || requested_fc > 3) {
4209                 device_printf(dev,
4210                     "Invalid fc mode; valid modes are 0 through 3\n");
4211                 return (EINVAL);
4212         }
4213
4214         /*
4215         ** Changing flow control mode currently does not work on
4216         ** 40GBASE-CR4 PHYs
4217         */
4218         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4219             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4220                 device_printf(dev, "Changing flow control mode unsupported"
4221                     " on 40GBase-CR4 media.\n");
4222                 return (ENODEV);
4223         }
4224
4225         /* Set fc ability for port */
4226         hw->fc.requested_mode = requested_fc;
4227         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4228         if (aq_error) {
4229                 device_printf(dev,
4230                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4231                     __func__, aq_error, fc_aq_err);
4232                 return (EAGAIN);
4233         }
4234
4235         if (hw->fc.current_mode != hw->fc.requested_mode) {
4236                 device_printf(dev, "%s: FC set failure:\n", __func__);
4237                 device_printf(dev, "%s: Current: %s / Requested: %s\n",
4238                     __func__,
4239                     ixl_fc_string[hw->fc.current_mode],
4240                     ixl_fc_string[hw->fc.requested_mode]);
4241         }
4242
4243         return (0);
4244 }
4245
4246 static int
4247 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4248 {
4249         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4250         struct i40e_hw *hw = &pf->hw;
4251         int error = 0, index = 0;
4252
4253         char *speeds[] = {
4254                 "Unknown",
4255                 "100M",
4256                 "1G",
4257                 "10G",
4258                 "40G",
4259                 "20G"
4260         };
4261
4262         ixl_update_link_status(pf);
4263
4264         switch (hw->phy.link_info.link_speed) {
4265         case I40E_LINK_SPEED_100MB:
4266                 index = 1;
4267                 break;
4268         case I40E_LINK_SPEED_1GB:
4269                 index = 2;
4270                 break;
4271         case I40E_LINK_SPEED_10GB:
4272                 index = 3;
4273                 break;
4274         case I40E_LINK_SPEED_40GB:
4275                 index = 4;
4276                 break;
4277         case I40E_LINK_SPEED_20GB:
4278                 index = 5;
4279                 break;
4280         case I40E_LINK_SPEED_UNKNOWN:
4281         default:
4282                 index = 0;
4283                 break;
4284         }
4285
4286         error = sysctl_handle_string(oidp, speeds[index],
4287             strlen(speeds[index]), req);
4288         return (error);
4289 }
4290
4291 /*
4292 ** Control link advertise speed:
4293 **      Flags:
4294 **      0x1 - advertise 100 Mb
4295 **      0x2 - advertise 1G
4296 **      0x4 - advertise 10G
4297 **
4298 ** Does not work on 40G devices.
4299 */
4300 static int
4301 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4302 {
4303         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4304         struct i40e_hw *hw = &pf->hw;
4305         device_t dev = pf->dev;
4306         struct i40e_aq_get_phy_abilities_resp abilities;
4307         struct i40e_aq_set_phy_config config;
4308         int requested_ls = 0;
4309         enum i40e_status_code aq_error = 0;
4310         int error = 0;
4311
4312         /*
4313         ** FW doesn't support changing advertised speed
4314         ** for 40G devices; speed is always 40G.
4315         */
4316         if (i40e_is_40G_device(hw->device_id))
4317                 return (ENODEV);
4318
4319         /* Read in new mode */
4320         requested_ls = pf->advertised_speed;
4321         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4322         if ((error) || (req->newptr == NULL))
4323                 return (error);
4324         if (requested_ls < 1 || requested_ls > 7) {
4325                 device_printf(dev,
4326                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4327                 return (EINVAL);
4328         }
4329
4330         /* Exit if no change */
4331         if (pf->advertised_speed == requested_ls)
4332                 return (0);
4333
4334         /* Get current capability information */
4335         aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4336         if (aq_error) {
4337                 device_printf(dev, "%s: Error getting phy capabilities %d,"
4338                     " aq error: %d\n", __func__, aq_error,
4339                     hw->aq.asq_last_status);
4340                 return (EAGAIN);
4341         }
4342
4343         /* Prepare new config */
4344         bzero(&config, sizeof(config));
4345         config.phy_type = abilities.phy_type;
4346         config.abilities = abilities.abilities
4347             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4348         config.eee_capability = abilities.eee_capability;
4349         config.eeer = abilities.eeer_val;
4350         config.low_power_ctrl = abilities.d3_lpan;
4351         /* Translate into aq cmd link_speed */
4352         if (requested_ls & 0x4)
4353                 config.link_speed |= I40E_LINK_SPEED_10GB;
4354         if (requested_ls & 0x2)
4355                 config.link_speed |= I40E_LINK_SPEED_1GB;
4356         if (requested_ls & 0x1)
4357                 config.link_speed |= I40E_LINK_SPEED_100MB;
4358
4359         /* Do aq command & restart link */
4360         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4361         if (aq_error) {
4362                 device_printf(dev, "%s: Error setting new phy config %d,"
4363                     " aq error: %d\n", __func__, aq_error,
4364                     hw->aq.asq_last_status);
4365                 return (EAGAIN);
4366         }
4367
4368         pf->advertised_speed = requested_ls;
4369         ixl_update_link_status(pf);
4370         return (0);
4371 }
4372
4373 /*
4374 ** Get the width and transaction speed of
4375 ** the bus this adapter is plugged into.
4376 */
4377 static u16
4378 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4379 {
4380         u16                     link;
4381         u32                     offset;
4382                 
4383                 
4384         /* Get the PCI Express Capabilities offset */
4385         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4386
4387         /* ...and read the Link Status Register */
4388         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4389
4390         switch (link & I40E_PCI_LINK_WIDTH) {
4391         case I40E_PCI_LINK_WIDTH_1:
4392                 hw->bus.width = i40e_bus_width_pcie_x1;
4393                 break;
4394         case I40E_PCI_LINK_WIDTH_2:
4395                 hw->bus.width = i40e_bus_width_pcie_x2;
4396                 break;
4397         case I40E_PCI_LINK_WIDTH_4:
4398                 hw->bus.width = i40e_bus_width_pcie_x4;
4399                 break;
4400         case I40E_PCI_LINK_WIDTH_8:
4401                 hw->bus.width = i40e_bus_width_pcie_x8;
4402                 break;
4403         default:
4404                 hw->bus.width = i40e_bus_width_unknown;
4405                 break;
4406         }
4407
4408         switch (link & I40E_PCI_LINK_SPEED) {
4409         case I40E_PCI_LINK_SPEED_2500:
4410                 hw->bus.speed = i40e_bus_speed_2500;
4411                 break;
4412         case I40E_PCI_LINK_SPEED_5000:
4413                 hw->bus.speed = i40e_bus_speed_5000;
4414                 break;
4415         case I40E_PCI_LINK_SPEED_8000:
4416                 hw->bus.speed = i40e_bus_speed_8000;
4417                 break;
4418         default:
4419                 hw->bus.speed = i40e_bus_speed_unknown;
4420                 break;
4421         }
4422
4423
4424         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4425             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4426             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4427             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4428             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4429             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4430             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4431             ("Unknown"));
4432
4433         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4434             (hw->bus.speed < i40e_bus_speed_8000)) {
4435                 device_printf(dev, "PCI-Express bandwidth available"
4436                     " for this device\n     is not sufficient for"
4437                     " normal operation.\n");
4438                 device_printf(dev, "For expected performance a x8 "
4439                     "PCIE Gen3 slot is required.\n");
4440         }
4441
4442         return (link);
4443 }
4444
4445 #ifdef IXL_DEBUG
4446 static int
4447 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4448 {
4449         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4450         struct i40e_hw *hw = &pf->hw;
4451         struct i40e_link_status link_status;
4452         char buf[512];
4453
4454         enum i40e_status_code aq_error = 0;
4455
4456         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4457         if (aq_error) {
4458                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4459                 return (EPERM);
4460         }
4461
4462         sprintf(buf, "\n"
4463             "PHY Type : %#04x\n"
4464             "Speed    : %#04x\n" 
4465             "Link info: %#04x\n" 
4466             "AN info  : %#04x\n" 
4467             "Ext info : %#04x", 
4468             link_status.phy_type, link_status.link_speed, 
4469             link_status.link_info, link_status.an_info,
4470             link_status.ext_info);
4471
4472         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4473 }
4474
4475 static int
4476 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4477 {
4478         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4479         struct i40e_hw *hw = &pf->hw;
4480         struct i40e_aq_get_phy_abilities_resp abilities_resp;
4481         char buf[512];
4482
4483         enum i40e_status_code aq_error = 0;
4484
4485         // TODO: Print out list of qualified modules as well?
4486         aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4487         if (aq_error) {
4488                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4489                 return (EPERM);
4490         }
4491
4492         sprintf(buf, "\n"
4493             "PHY Type : %#010x\n"
4494             "Speed    : %#04x\n" 
4495             "Abilities: %#04x\n" 
4496             "EEE cap  : %#06x\n" 
4497             "EEER reg : %#010x\n" 
4498             "D3 Lpan  : %#04x",
4499             abilities_resp.phy_type, abilities_resp.link_speed, 
4500             abilities_resp.abilities, abilities_resp.eee_capability,
4501             abilities_resp.eeer_val, abilities_resp.d3_lpan);
4502
4503         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4504 }
4505
4506 static int
4507 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4508 {
4509         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4510         struct ixl_vsi *vsi = &pf->vsi;
4511         struct ixl_mac_filter *f;
4512         char *buf, *buf_i;
4513
4514         int error = 0;
4515         int ftl_len = 0;
4516         int ftl_counter = 0;
4517         int buf_len = 0;
4518         int entry_len = 42;
4519
4520         SLIST_FOREACH(f, &vsi->ftl, next) {
4521                 ftl_len++;
4522         }
4523
4524         if (ftl_len < 1) {
4525                 sysctl_handle_string(oidp, "(none)", 6, req);
4526                 return (0);
4527         }
4528
4529         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4530         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4531
4532         sprintf(buf_i++, "\n");
4533         SLIST_FOREACH(f, &vsi->ftl, next) {
4534                 sprintf(buf_i,
4535                     MAC_FORMAT ", vlan %4d, flags %#06x",
4536                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4537                 buf_i += entry_len;
4538                 /* don't print '\n' for last entry */
4539                 if (++ftl_counter != ftl_len) {
4540                         sprintf(buf_i, "\n");
4541                         buf_i++;
4542                 }
4543         }
4544
4545         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4546         if (error)
4547                 printf("sysctl error: %d\n", error);
4548         free(buf, M_DEVBUF);
4549         return error;
4550 }
4551
4552 #define IXL_SW_RES_SIZE 0x14
4553 static int
4554 ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4555 {
4556         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4557         struct i40e_hw *hw = &pf->hw;
4558         device_t dev = pf->dev;
4559         struct sbuf *buf;
4560         int error = 0;
4561
4562         u8 num_entries;
4563         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4564
4565         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4566         if (!buf) {
4567                 device_printf(dev, "Could not allocate sbuf for output.\n");
4568                 return (ENOMEM);
4569         }
4570
4571         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4572                                 resp,
4573                                 IXL_SW_RES_SIZE,
4574                                 NULL);
4575         if (error) {
4576                 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4577                     __func__, error, hw->aq.asq_last_status);
4578                 sbuf_delete(buf);
4579                 return error;
4580         }
4581         device_printf(dev, "Num_entries: %d\n", num_entries);
4582
4583         sbuf_cat(buf, "\n");
4584         sbuf_printf(buf,
4585             "Type | Guaranteed | Total | Used   | Un-allocated\n"
4586             "     | (this)     | (all) | (this) | (all)       \n");
4587         for (int i = 0; i < num_entries; i++) {
4588                 sbuf_printf(buf,
4589                     "%#4x | %10d   %5d   %6d   %12d",
4590                     resp[i].resource_type,
4591                     resp[i].guaranteed,
4592                     resp[i].total,
4593                     resp[i].used,
4594                     resp[i].total_unalloced);
4595                 if (i < num_entries - 1)
4596                         sbuf_cat(buf, "\n");
4597         }
4598
4599         error = sbuf_finish(buf);
4600         if (error) {
4601                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4602                 sbuf_delete(buf);
4603                 return error;
4604         }
4605
4606         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4607         if (error)
4608                 device_printf(dev, "sysctl error: %d\n", error);
4609         sbuf_delete(buf);
4610         return error;
4611
4612 }
4613
4614 /*
4615 ** Dump TX desc given index.
4616 ** Doesn't work; don't use.
4617 ** TODO: Also needs a queue index input!
4618 **/
4619 static int
4620 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4621 {
4622         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4623         device_t dev = pf->dev;
4624         struct sbuf *buf;
4625         int error = 0;
4626
4627         u16 desc_idx = 0;
4628
4629         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4630         if (!buf) {
4631                 device_printf(dev, "Could not allocate sbuf for output.\n");
4632                 return (ENOMEM);
4633         }
4634
4635         /* Read in index */
4636         error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4637         if (error)
4638                 return (error);
4639         if (req->newptr == NULL)
4640                 return (EIO); // fix
4641         if (desc_idx > 1024) { // fix
4642                 device_printf(dev,
4643                     "Invalid descriptor index, needs to be < 1024\n"); // fix
4644                 return (EINVAL);
4645         }
4646
4647         // Don't use this sysctl yet
4648         if (TRUE)
4649                 return (ENODEV);
4650
4651         sbuf_cat(buf, "\n");
4652
4653         // set to queue 1?
4654         struct ixl_queue *que = pf->vsi.queues;
4655         struct tx_ring *txr = &(que[1].txr);
4656         struct i40e_tx_desc *txd = &txr->base[desc_idx];
4657
4658         sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4659         sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4660         sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4661
4662         error = sbuf_finish(buf);
4663         if (error) {
4664                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4665                 sbuf_delete(buf);
4666                 return error;
4667         }
4668
4669         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4670         if (error)
4671                 device_printf(dev, "sysctl error: %d\n", error);
4672         sbuf_delete(buf);
4673         return error;
4674 }
4675 #endif
4676