]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixl/if_ixl.c
MFC r277084,r277088,r277130,r277143,r277151,r277262
[FreeBSD/stable/10.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2014, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "ixl.h"
38 #include "ixl_pf.h"
39
40 #ifdef RSS
41 #include <net/rss_config.h>
42 #endif
43
44 /*********************************************************************
45  *  Driver version
46  *********************************************************************/
47 char ixl_driver_version[] = "1.3.1";
48
49 /*********************************************************************
50  *  PCI Device ID Table
51  *
52  *  Used by probe to select devices to load on
53  *  Last field stores an index into ixl_strings
54  *  Last entry must be all 0s
55  *
56  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57  *********************************************************************/
58
59 static ixl_vendor_info_t ixl_vendor_info_array[] =
60 {
61         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
62         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
63         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
67         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
68         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
69         /* required last entry */
70         {0, 0, 0, 0, 0}
71 };
72
73 /*********************************************************************
74  *  Table of branding strings
75  *********************************************************************/
76
77 static char    *ixl_strings[] = {
78         "Intel(R) Ethernet Connection XL710 Driver"
79 };
80
81
82 /*********************************************************************
83  *  Function prototypes
84  *********************************************************************/
85 static int      ixl_probe(device_t);
86 static int      ixl_attach(device_t);
87 static int      ixl_detach(device_t);
88 static int      ixl_shutdown(device_t);
89 static int      ixl_get_hw_capabilities(struct ixl_pf *);
90 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
91 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
92 static void     ixl_init(void *);
93 static void     ixl_init_locked(struct ixl_pf *);
94 static void     ixl_stop(struct ixl_pf *);
95 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
96 static int      ixl_media_change(struct ifnet *);
97 static void     ixl_update_link_status(struct ixl_pf *);
98 static int      ixl_allocate_pci_resources(struct ixl_pf *);
99 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
100 static int      ixl_setup_stations(struct ixl_pf *);
101 static int      ixl_setup_vsi(struct ixl_vsi *);
102 static int      ixl_initialize_vsi(struct ixl_vsi *);
103 static int      ixl_assign_vsi_msix(struct ixl_pf *);
104 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
105 static int      ixl_init_msix(struct ixl_pf *);
106 static void     ixl_configure_msix(struct ixl_pf *);
107 static void     ixl_configure_itr(struct ixl_pf *);
108 static void     ixl_configure_legacy(struct ixl_pf *);
109 static void     ixl_free_pci_resources(struct ixl_pf *);
110 static void     ixl_local_timer(void *);
111 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
112 static bool     ixl_config_link(struct i40e_hw *);
113 static void     ixl_config_rss(struct ixl_vsi *);
114 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
115 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
116 static int      ixl_set_advertised_speeds(struct ixl_pf *, int);
117
118 static void     ixl_enable_rings(struct ixl_vsi *);
119 static void     ixl_disable_rings(struct ixl_vsi *);
120 static void     ixl_enable_intr(struct ixl_vsi *);
121 static void     ixl_disable_intr(struct ixl_vsi *);
122
123 static void     ixl_enable_adminq(struct i40e_hw *);
124 static void     ixl_disable_adminq(struct i40e_hw *);
125 static void     ixl_enable_queue(struct i40e_hw *, int);
126 static void     ixl_disable_queue(struct i40e_hw *, int);
127 static void     ixl_enable_legacy(struct i40e_hw *);
128 static void     ixl_disable_legacy(struct i40e_hw *);
129
130 static void     ixl_set_promisc(struct ixl_vsi *);
131 static void     ixl_add_multi(struct ixl_vsi *);
132 static void     ixl_del_multi(struct ixl_vsi *);
133 static void     ixl_register_vlan(void *, struct ifnet *, u16);
134 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
135 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
136
137 static void     ixl_init_filters(struct ixl_vsi *);
138 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
139 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
140 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
141 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
142 static struct ixl_mac_filter *
143                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
144 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
145
146 /* Sysctl debug interface */
147 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
148 static void     ixl_print_debug_info(struct ixl_pf *);
149
150 /* The MSI/X Interrupt handlers */
151 static void     ixl_intr(void *);
152 static void     ixl_msix_que(void *);
153 static void     ixl_msix_adminq(void *);
154 static void     ixl_handle_mdd_event(struct ixl_pf *);
155
156 /* Deferred interrupt tasklets */
157 static void     ixl_do_adminq(void *, int);
158
159 /* Sysctl handlers */
160 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
161 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
162 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
163 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
164
165 /* Statistics */
166 static void     ixl_add_hw_stats(struct ixl_pf *);
167 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
168                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
169 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
170                     struct sysctl_oid_list *,
171                     struct i40e_eth_stats *);
172 static void     ixl_update_stats_counters(struct ixl_pf *);
173 static void     ixl_update_eth_stats(struct ixl_vsi *);
174 static void     ixl_pf_reset_stats(struct ixl_pf *);
175 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
176 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
177                     u64 *, u64 *);
178 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
179                     u64 *, u64 *);
180
181 #ifdef IXL_DEBUG_SYSCTL
182 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
183 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
184 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
185 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
186 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
187 static int      ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
188 #endif
189
190 /*********************************************************************
191  *  FreeBSD Device Interface Entry Points
192  *********************************************************************/
193
194 static device_method_t ixl_methods[] = {
195         /* Device interface */
196         DEVMETHOD(device_probe, ixl_probe),
197         DEVMETHOD(device_attach, ixl_attach),
198         DEVMETHOD(device_detach, ixl_detach),
199         DEVMETHOD(device_shutdown, ixl_shutdown),
200         {0, 0}
201 };
202
203 static driver_t ixl_driver = {
204         "ixl", ixl_methods, sizeof(struct ixl_pf),
205 };
206
207 devclass_t ixl_devclass;
208 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
209
210 MODULE_DEPEND(ixl, pci, 1, 1, 1);
211 MODULE_DEPEND(ixl, ether, 1, 1, 1);
212
213 /*
214 ** Global reset mutex
215 */
216 static struct mtx ixl_reset_mtx;
217
218 /*
219 ** TUNEABLE PARAMETERS:
220 */
221
222 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
223                    "IXL driver parameters");
224
225 /*
226  * MSIX should be the default for best performance,
227  * but this allows it to be forced off for testing.
228  */
229 static int ixl_enable_msix = 1;
230 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
231 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
232     "Enable MSI-X interrupts");
233
234 /*
235 ** Number of descriptors per ring:
236 **   - TX and RX are the same size
237 */
238 static int ixl_ringsz = DEFAULT_RING;
239 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
240 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
241     &ixl_ringsz, 0, "Descriptor Ring Size");
242
243 /* 
244 ** This can be set manually, if left as 0 the
245 ** number of queues will be calculated based
246 ** on cpus and msix vectors available.
247 */
248 int ixl_max_queues = 0;
249 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
251     &ixl_max_queues, 0, "Number of Queues");
252
253 /*
254 ** Controls for Interrupt Throttling 
255 **      - true/false for dynamic adjustment
256 **      - default values for static ITR
257 */
258 int ixl_dynamic_rx_itr = 0;
259 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
260 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
261     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
262
263 int ixl_dynamic_tx_itr = 0;
264 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
265 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
266     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
267
268 int ixl_rx_itr = IXL_ITR_8K;
269 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
271     &ixl_rx_itr, 0, "RX Interrupt Rate");
272
273 int ixl_tx_itr = IXL_ITR_4K;
274 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
276     &ixl_tx_itr, 0, "TX Interrupt Rate");
277
278 #ifdef IXL_FDIR
279 static int ixl_enable_fdir = 1;
280 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
281 /* Rate at which we sample */
282 int ixl_atr_rate = 20;
283 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
284 #endif
285
286
287 static char *ixl_fc_string[6] = {
288         "None",
289         "Rx",
290         "Tx",
291         "Full",
292         "Priority",
293         "Default"
294 };
295
296
297 /*********************************************************************
298  *  Device identification routine
299  *
300  *  ixl_probe determines if the driver should be loaded on
301  *  the hardware based on PCI vendor/device id of the device.
302  *
303  *  return BUS_PROBE_DEFAULT on success, positive on failure
304  *********************************************************************/
305
306 static int
307 ixl_probe(device_t dev)
308 {
309         ixl_vendor_info_t *ent;
310
311         u16     pci_vendor_id, pci_device_id;
312         u16     pci_subvendor_id, pci_subdevice_id;
313         char    device_name[256];
314         static bool lock_init = FALSE;
315
316         INIT_DEBUGOUT("ixl_probe: begin");
317
318         pci_vendor_id = pci_get_vendor(dev);
319         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
320                 return (ENXIO);
321
322         pci_device_id = pci_get_device(dev);
323         pci_subvendor_id = pci_get_subvendor(dev);
324         pci_subdevice_id = pci_get_subdevice(dev);
325
326         ent = ixl_vendor_info_array;
327         while (ent->vendor_id != 0) {
328                 if ((pci_vendor_id == ent->vendor_id) &&
329                     (pci_device_id == ent->device_id) &&
330
331                     ((pci_subvendor_id == ent->subvendor_id) ||
332                      (ent->subvendor_id == 0)) &&
333
334                     ((pci_subdevice_id == ent->subdevice_id) ||
335                      (ent->subdevice_id == 0))) {
336                         sprintf(device_name, "%s, Version - %s",
337                                 ixl_strings[ent->index],
338                                 ixl_driver_version);
339                         device_set_desc_copy(dev, device_name);
340                         /* One shot mutex init */
341                         if (lock_init == FALSE) {
342                                 lock_init = TRUE;
343                                 mtx_init(&ixl_reset_mtx,
344                                     "ixl_reset",
345                                     "IXL RESET Lock", MTX_DEF);
346                         }
347                         return (BUS_PROBE_DEFAULT);
348                 }
349                 ent++;
350         }
351         return (ENXIO);
352 }
353
354 /*********************************************************************
355  *  Device initialization routine
356  *
357  *  The attach entry point is called when the driver is being loaded.
358  *  This routine identifies the type of hardware, allocates all resources
359  *  and initializes the hardware.
360  *
361  *  return 0 on success, positive on failure
362  *********************************************************************/
363
364 static int
365 ixl_attach(device_t dev)
366 {
367         struct ixl_pf   *pf;
368         struct i40e_hw  *hw;
369         struct ixl_vsi *vsi;
370         u16             bus;
371         int             error = 0;
372
373         INIT_DEBUGOUT("ixl_attach: begin");
374
375         /* Allocate, clear, and link in our primary soft structure */
376         pf = device_get_softc(dev);
377         pf->dev = pf->osdep.dev = dev;
378         hw = &pf->hw;
379
380         /*
381         ** Note this assumes we have a single embedded VSI,
382         ** this could be enhanced later to allocate multiple
383         */
384         vsi = &pf->vsi;
385         vsi->dev = pf->dev;
386
387         /* Core Lock Init*/
388         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
389
390         /* Set up the timer callout */
391         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
392
393         /* Set up sysctls */
394         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
395             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
397             pf, 0, ixl_set_flowcntl, "I", "Flow Control");
398
399         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
400             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
401             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
402             pf, 0, ixl_set_advertise, "I", "Advertised Speed");
403
404         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
407             pf, 0, ixl_current_speed, "A", "Current Port Speed");
408
409         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
410             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
412             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
413
414         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
415             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416             OID_AUTO, "rx_itr", CTLFLAG_RW,
417             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
418
419         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
420             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
422             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
423
424         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
425             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426             OID_AUTO, "tx_itr", CTLFLAG_RW,
427             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
428
429         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
430             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
431             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
432             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
433
434 #ifdef IXL_DEBUG_SYSCTL
435         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
436             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
437             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
438             pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
439
440         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
441             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
443             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
444
445         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
448             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
449
450         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
453             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
454
455         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
458             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
459
460         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
461             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462             OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
463             pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
464 #endif
465
466         /* Save off the PCI information */
467         hw->vendor_id = pci_get_vendor(dev);
468         hw->device_id = pci_get_device(dev);
469         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
470         hw->subsystem_vendor_id =
471             pci_read_config(dev, PCIR_SUBVEND_0, 2);
472         hw->subsystem_device_id =
473             pci_read_config(dev, PCIR_SUBDEV_0, 2);
474
475         hw->bus.device = pci_get_slot(dev);
476         hw->bus.func = pci_get_function(dev);
477
478         /* Do PCI setup - map BAR0, etc */
479         if (ixl_allocate_pci_resources(pf)) {
480                 device_printf(dev, "Allocation of PCI resources failed\n");
481                 error = ENXIO;
482                 goto err_out;
483         }
484
485         /* Create for initial debugging use */
486         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
489             ixl_debug_info, "I", "Debug Information");
490
491
492         /* Establish a clean starting point */
493         i40e_clear_hw(hw);
494         error = i40e_pf_reset(hw);
495         if (error) {
496                 device_printf(dev,"PF reset failure %x\n", error);
497                 error = EIO;
498                 goto err_out;
499         }
500
501         /* For now always do an initial CORE reset on first device */
502         {
503                 static int      ixl_dev_count;
504                 static int      ixl_dev_track[32];
505                 u32             my_dev;
506                 int             i, found = FALSE;
507                 u16             bus = pci_get_bus(dev);
508
509                 mtx_lock(&ixl_reset_mtx);
510                 my_dev = (bus << 8) | hw->bus.device;
511
512                 for (i = 0; i < ixl_dev_count; i++) {
513                         if (ixl_dev_track[i] == my_dev)
514                                 found = TRUE;
515                 }
516
517                 if (!found) {
518                         u32 reg;
519
520                         ixl_dev_track[ixl_dev_count] = my_dev;
521                         ixl_dev_count++;
522
523                         INIT_DEBUGOUT("Initial CORE RESET\n");
524                         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
525                         ixl_flush(hw);
526                         i = 50;
527                         do {
528                                 i40e_msec_delay(50);
529                                 reg = rd32(hw, I40E_GLGEN_RSTAT);
530                                 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
531                                         break;
532                         } while (i--);
533
534                         /* paranoia */
535                         wr32(hw, I40E_PF_ATQLEN, 0);
536                         wr32(hw, I40E_PF_ATQBAL, 0);
537                         wr32(hw, I40E_PF_ATQBAH, 0);
538                         i40e_clear_pxe_mode(hw);
539                 }
540                 mtx_unlock(&ixl_reset_mtx);
541         }
542
543         /* Set admin queue parameters */
544         hw->aq.num_arq_entries = IXL_AQ_LEN;
545         hw->aq.num_asq_entries = IXL_AQ_LEN;
546         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
547         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
548
549         /* Initialize the shared code */
550         error = i40e_init_shared_code(hw);
551         if (error) {
552                 device_printf(dev,"Unable to initialize the shared code\n");
553                 error = EIO;
554                 goto err_out;
555         }
556
557         /* Set up the admin queue */
558         error = i40e_init_adminq(hw);
559         if (error) {
560                 device_printf(dev, "The driver for the device stopped "
561                     "because the NVM image is newer than expected.\n"
562                     "You must install the most recent version of "
563                     " the network driver.\n");
564                 goto err_out;
565         }
566         device_printf(dev, "%s\n", ixl_fw_version_str(hw));
567
568         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
569             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
570                 device_printf(dev, "The driver for the device detected "
571                     "a newer version of the NVM image than expected.\n"
572                     "Please install the most recent version of the network driver.\n");
573         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
574             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
575                 device_printf(dev, "The driver for the device detected "
576                     "an older version of the NVM image than expected.\n"
577                     "Please update the NVM image.\n");
578
579         /* Clear PXE mode */
580         i40e_clear_pxe_mode(hw);
581
582         /* Get capabilities from the device */
583         error = ixl_get_hw_capabilities(pf);
584         if (error) {
585                 device_printf(dev, "HW capabilities failure!\n");
586                 goto err_get_cap;
587         }
588
589         /* Set up host memory cache */
590         error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
591         if (error) {
592                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
593                 goto err_get_cap;
594         }
595
596         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
597         if (error) {
598                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
599                 goto err_mac_hmc;
600         }
601
602         /* Disable LLDP from the firmware */
603         i40e_aq_stop_lldp(hw, TRUE, NULL);
604
605         i40e_get_mac_addr(hw, hw->mac.addr);
606         error = i40e_validate_mac_addr(hw->mac.addr);
607         if (error) {
608                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
609                 goto err_mac_hmc;
610         }
611         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
612         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
613
614         /* Set up VSI and queues */
615         if (ixl_setup_stations(pf) != 0) { 
616                 device_printf(dev, "setup stations failed!\n");
617                 error = ENOMEM;
618                 goto err_mac_hmc;
619         }
620
621         /* Initialize mac filter list for VSI */
622         SLIST_INIT(&vsi->ftl);
623
624         /* Set up interrupt routing here */
625         if (pf->msix > 1)
626                 error = ixl_assign_vsi_msix(pf);
627         else
628                 error = ixl_assign_vsi_legacy(pf);
629         if (error) 
630                 goto err_late;
631
632         i40e_msec_delay(75);
633         error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
634         if (error) {
635                 device_printf(dev, "link restart failed, aq_err=%d\n",
636                     pf->hw.aq.asq_last_status);
637         }
638         
639         /* Determine link state */
640         vsi->link_up = ixl_config_link(hw);
641
642         /* Report if Unqualified modules are found */
643         if ((vsi->link_up == FALSE) &&
644             (pf->hw.phy.link_info.link_info &
645             I40E_AQ_MEDIA_AVAILABLE) &&
646             (!(pf->hw.phy.link_info.an_info &
647             I40E_AQ_QUALIFIED_MODULE)))
648                 device_printf(dev, "Link failed because "
649                     "an unqualified module was detected\n");
650
651         /* Setup OS specific network interface */
652         if (ixl_setup_interface(dev, vsi) != 0) {
653                 device_printf(dev, "interface setup failed!\n");
654                 error = EIO;
655                 goto err_late;
656         }
657
658         /* Get the bus configuration and set the shared code */
659         bus = ixl_get_bus_info(hw, dev);
660         i40e_set_pci_config_data(hw, bus);
661
662         /* Initialize statistics */
663         ixl_pf_reset_stats(pf);
664         ixl_update_stats_counters(pf);
665         ixl_add_hw_stats(pf);
666
667         /* Reset port's advertised speeds */
668         if (!i40e_is_40G_device(hw->device_id)) {
669                 pf->advertised_speed =
670                     (hw->device_id == I40E_DEV_ID_10G_BASE_T) ? 0x7 : 0x6;
671                 ixl_set_advertised_speeds(pf, pf->advertised_speed);
672         }
673
674         /* Register for VLAN events */
675         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
676             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
677         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
678             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
679
680
681         INIT_DEBUGOUT("ixl_attach: end");
682         return (0);
683
684 err_late:
685         if (vsi->ifp != NULL)
686                 if_free(vsi->ifp);
687 err_mac_hmc:
688         i40e_shutdown_lan_hmc(hw);
689 err_get_cap:
690         i40e_shutdown_adminq(hw);
691 err_out:
692         ixl_free_pci_resources(pf);
693         ixl_free_vsi(vsi);
694         IXL_PF_LOCK_DESTROY(pf);
695         return (error);
696 }
697
698 /*********************************************************************
699  *  Device removal routine
700  *
701  *  The detach entry point is called when the driver is being removed.
702  *  This routine stops the adapter and deallocates all the resources
703  *  that were allocated for driver operation.
704  *
705  *  return 0 on success, positive on failure
706  *********************************************************************/
707
708 static int
709 ixl_detach(device_t dev)
710 {
711         struct ixl_pf           *pf = device_get_softc(dev);
712         struct i40e_hw          *hw = &pf->hw;
713         struct ixl_vsi          *vsi = &pf->vsi;
714         struct ixl_queue        *que = vsi->queues;
715         i40e_status             status;
716
717         INIT_DEBUGOUT("ixl_detach: begin");
718
719         /* Make sure VLANS are not using driver */
720         if (vsi->ifp->if_vlantrunk != NULL) {
721                 device_printf(dev,"Vlan in use, detach first\n");
722                 return (EBUSY);
723         }
724
725         IXL_PF_LOCK(pf);
726         ixl_stop(pf);
727         IXL_PF_UNLOCK(pf);
728
729         for (int i = 0; i < vsi->num_queues; i++, que++) {
730                 if (que->tq) {
731                         taskqueue_drain(que->tq, &que->task);
732                         taskqueue_drain(que->tq, &que->tx_task);
733                         taskqueue_free(que->tq);
734                 }
735         }
736
737         /* Shutdown LAN HMC */
738         status = i40e_shutdown_lan_hmc(hw);
739         if (status)
740                 device_printf(dev,
741                     "Shutdown LAN HMC failed with code %d\n", status);
742
743         /* Shutdown admin queue */
744         status = i40e_shutdown_adminq(hw);
745         if (status)
746                 device_printf(dev,
747                     "Shutdown Admin queue failed with code %d\n", status);
748
749         /* Unregister VLAN events */
750         if (vsi->vlan_attach != NULL)
751                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
752         if (vsi->vlan_detach != NULL)
753                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
754
755         ether_ifdetach(vsi->ifp);
756         callout_drain(&pf->timer);
757
758
759         ixl_free_pci_resources(pf);
760         bus_generic_detach(dev);
761         if_free(vsi->ifp);
762         ixl_free_vsi(vsi);
763         IXL_PF_LOCK_DESTROY(pf);
764         return (0);
765 }
766
767 /*********************************************************************
768  *
769  *  Shutdown entry point
770  *
771  **********************************************************************/
772
773 static int
774 ixl_shutdown(device_t dev)
775 {
776         struct ixl_pf *pf = device_get_softc(dev);
777         IXL_PF_LOCK(pf);
778         ixl_stop(pf);
779         IXL_PF_UNLOCK(pf);
780         return (0);
781 }
782
783
784 /*********************************************************************
785  *
786  *  Get the hardware capabilities
787  *
788  **********************************************************************/
789
790 static int
791 ixl_get_hw_capabilities(struct ixl_pf *pf)
792 {
793         struct i40e_aqc_list_capabilities_element_resp *buf;
794         struct i40e_hw  *hw = &pf->hw;
795         device_t        dev = pf->dev;
796         int             error, len;
797         u16             needed;
798         bool            again = TRUE;
799
800         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
801 retry:
802         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
803             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
804                 device_printf(dev, "Unable to allocate cap memory\n");
805                 return (ENOMEM);
806         }
807
808         /* This populates the hw struct */
809         error = i40e_aq_discover_capabilities(hw, buf, len,
810             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
811         free(buf, M_DEVBUF);
812         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
813             (again == TRUE)) {
814                 /* retry once with a larger buffer */
815                 again = FALSE;
816                 len = needed;
817                 goto retry;
818         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
819                 device_printf(dev, "capability discovery failed: %d\n",
820                     pf->hw.aq.asq_last_status);
821                 return (ENODEV);
822         }
823
824         /* Capture this PF's starting queue pair */
825         pf->qbase = hw->func_caps.base_queue;
826
827 #ifdef IXL_DEBUG
828         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
829             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
830             hw->pf_id, hw->func_caps.num_vfs,
831             hw->func_caps.num_msix_vectors,
832             hw->func_caps.num_msix_vectors_vf,
833             hw->func_caps.fd_filters_guaranteed,
834             hw->func_caps.fd_filters_best_effort,
835             hw->func_caps.num_tx_qp,
836             hw->func_caps.num_rx_qp,
837             hw->func_caps.base_queue);
838 #endif
839         return (error);
840 }
841
842 static void
843 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
844 {
845         device_t        dev = vsi->dev;
846
847         /* Enable/disable TXCSUM/TSO4 */
848         if (!(ifp->if_capenable & IFCAP_TXCSUM)
849             && !(ifp->if_capenable & IFCAP_TSO4)) {
850                 if (mask & IFCAP_TXCSUM) {
851                         ifp->if_capenable |= IFCAP_TXCSUM;
852                         /* enable TXCSUM, restore TSO if previously enabled */
853                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
854                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
855                                 ifp->if_capenable |= IFCAP_TSO4;
856                         }
857                 }
858                 else if (mask & IFCAP_TSO4) {
859                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
860                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
861                         device_printf(dev,
862                             "TSO4 requires txcsum, enabling both...\n");
863                 }
864         } else if((ifp->if_capenable & IFCAP_TXCSUM)
865             && !(ifp->if_capenable & IFCAP_TSO4)) {
866                 if (mask & IFCAP_TXCSUM)
867                         ifp->if_capenable &= ~IFCAP_TXCSUM;
868                 else if (mask & IFCAP_TSO4)
869                         ifp->if_capenable |= IFCAP_TSO4;
870         } else if((ifp->if_capenable & IFCAP_TXCSUM)
871             && (ifp->if_capenable & IFCAP_TSO4)) {
872                 if (mask & IFCAP_TXCSUM) {
873                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
874                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
875                         device_printf(dev, 
876                             "TSO4 requires txcsum, disabling both...\n");
877                 } else if (mask & IFCAP_TSO4)
878                         ifp->if_capenable &= ~IFCAP_TSO4;
879         }
880
881         /* Enable/disable TXCSUM_IPV6/TSO6 */
882         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
883             && !(ifp->if_capenable & IFCAP_TSO6)) {
884                 if (mask & IFCAP_TXCSUM_IPV6) {
885                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
886                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
887                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
888                                 ifp->if_capenable |= IFCAP_TSO6;
889                         }
890                 } else if (mask & IFCAP_TSO6) {
891                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
892                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
893                         device_printf(dev,
894                             "TSO6 requires txcsum6, enabling both...\n");
895                 }
896         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
897             && !(ifp->if_capenable & IFCAP_TSO6)) {
898                 if (mask & IFCAP_TXCSUM_IPV6)
899                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
900                 else if (mask & IFCAP_TSO6)
901                         ifp->if_capenable |= IFCAP_TSO6;
902         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
903             && (ifp->if_capenable & IFCAP_TSO6)) {
904                 if (mask & IFCAP_TXCSUM_IPV6) {
905                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
906                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
907                         device_printf(dev,
908                             "TSO6 requires txcsum6, disabling both...\n");
909                 } else if (mask & IFCAP_TSO6)
910                         ifp->if_capenable &= ~IFCAP_TSO6;
911         }
912 }
913
914 /*********************************************************************
915  *  Ioctl entry point
916  *
917  *  ixl_ioctl is called when the user wants to configure the
918  *  interface.
919  *
920  *  return 0 on success, positive on failure
921  **********************************************************************/
922
923 static int
924 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
925 {
926         struct ixl_vsi  *vsi = ifp->if_softc;
927         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
928         struct ifreq    *ifr = (struct ifreq *) data;
929 #if defined(INET) || defined(INET6)
930         struct ifaddr *ifa = (struct ifaddr *)data;
931         bool            avoid_reset = FALSE;
932 #endif
933         int             error = 0;
934
935         switch (command) {
936
937         case SIOCSIFADDR:
938 #ifdef INET
939                 if (ifa->ifa_addr->sa_family == AF_INET)
940                         avoid_reset = TRUE;
941 #endif
942 #ifdef INET6
943                 if (ifa->ifa_addr->sa_family == AF_INET6)
944                         avoid_reset = TRUE;
945 #endif
946 #if defined(INET) || defined(INET6)
947                 /*
948                 ** Calling init results in link renegotiation,
949                 ** so we avoid doing it when possible.
950                 */
951                 if (avoid_reset) {
952                         ifp->if_flags |= IFF_UP;
953                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
954                                 ixl_init(pf);
955 #ifdef INET
956                         if (!(ifp->if_flags & IFF_NOARP))
957                                 arp_ifinit(ifp, ifa);
958 #endif
959                 } else
960                         error = ether_ioctl(ifp, command, data);
961                 break;
962 #endif
963         case SIOCSIFMTU:
964                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
965                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
966                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
967                         error = EINVAL;
968                 } else {
969                         IXL_PF_LOCK(pf);
970                         ifp->if_mtu = ifr->ifr_mtu;
971                         vsi->max_frame_size =
972                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
973                             + ETHER_VLAN_ENCAP_LEN;
974                         ixl_init_locked(pf);
975                         IXL_PF_UNLOCK(pf);
976                 }
977                 break;
978         case SIOCSIFFLAGS:
979                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
980                 IXL_PF_LOCK(pf);
981                 if (ifp->if_flags & IFF_UP) {
982                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
983                                 if ((ifp->if_flags ^ pf->if_flags) &
984                                     (IFF_PROMISC | IFF_ALLMULTI)) {
985                                         ixl_set_promisc(vsi);
986                                 }
987                         } else
988                                 ixl_init_locked(pf);
989                 } else
990                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
991                                 ixl_stop(pf);
992                 pf->if_flags = ifp->if_flags;
993                 IXL_PF_UNLOCK(pf);
994                 break;
995         case SIOCADDMULTI:
996                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
997                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
998                         IXL_PF_LOCK(pf);
999                         ixl_disable_intr(vsi);
1000                         ixl_add_multi(vsi);
1001                         ixl_enable_intr(vsi);
1002                         IXL_PF_UNLOCK(pf);
1003                 }
1004                 break;
1005         case SIOCDELMULTI:
1006                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1007                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1008                         IXL_PF_LOCK(pf);
1009                         ixl_disable_intr(vsi);
1010                         ixl_del_multi(vsi);
1011                         ixl_enable_intr(vsi);
1012                         IXL_PF_UNLOCK(pf);
1013                 }
1014                 break;
1015         case SIOCSIFMEDIA:
1016         case SIOCGIFMEDIA:
1017                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1018                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1019                 break;
1020         case SIOCSIFCAP:
1021         {
1022                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1023                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1024
1025                 ixl_cap_txcsum_tso(vsi, ifp, mask);
1026
1027                 if (mask & IFCAP_RXCSUM)
1028                         ifp->if_capenable ^= IFCAP_RXCSUM;
1029                 if (mask & IFCAP_RXCSUM_IPV6)
1030                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1031                 if (mask & IFCAP_LRO)
1032                         ifp->if_capenable ^= IFCAP_LRO;
1033                 if (mask & IFCAP_VLAN_HWTAGGING)
1034                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1035                 if (mask & IFCAP_VLAN_HWFILTER)
1036                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1037                 if (mask & IFCAP_VLAN_HWTSO)
1038                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1039                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1040                         IXL_PF_LOCK(pf);
1041                         ixl_init_locked(pf);
1042                         IXL_PF_UNLOCK(pf);
1043                 }
1044                 VLAN_CAPABILITIES(ifp);
1045
1046                 break;
1047         }
1048
1049         default:
1050                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1051                 error = ether_ioctl(ifp, command, data);
1052                 break;
1053         }
1054
1055         return (error);
1056 }
1057
1058
1059 /*********************************************************************
1060  *  Init entry point
1061  *
1062  *  This routine is used in two ways. It is used by the stack as
1063  *  init entry point in network interface structure. It is also used
1064  *  by the driver as a hw/sw initialization routine to get to a
1065  *  consistent state.
1066  *
1067  *  return 0 on success, positive on failure
1068  **********************************************************************/
1069
1070 static void
1071 ixl_init_locked(struct ixl_pf *pf)
1072 {
1073         struct i40e_hw  *hw = &pf->hw;
1074         struct ixl_vsi  *vsi = &pf->vsi;
1075         struct ifnet    *ifp = vsi->ifp;
1076         device_t        dev = pf->dev;
1077         struct i40e_filter_control_settings     filter;
1078         u8              tmpaddr[ETHER_ADDR_LEN];
1079         int             ret;
1080
1081         mtx_assert(&pf->pf_mtx, MA_OWNED);
1082         INIT_DEBUGOUT("ixl_init: begin");
1083         ixl_stop(pf);
1084
1085         /* Get the latest mac address... User might use a LAA */
1086         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1087               I40E_ETH_LENGTH_OF_ADDRESS);
1088         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
1089             i40e_validate_mac_addr(tmpaddr)) {
1090                 bcopy(tmpaddr, hw->mac.addr,
1091                     I40E_ETH_LENGTH_OF_ADDRESS);
1092                 ret = i40e_aq_mac_address_write(hw,
1093                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1094                     hw->mac.addr, NULL);
1095                 if (ret) {
1096                         device_printf(dev, "LLA address"
1097                          "change failed!!\n");
1098                         return;
1099                 }
1100         }
1101
1102         /* Set the various hardware offload abilities */
1103         ifp->if_hwassist = 0;
1104         if (ifp->if_capenable & IFCAP_TSO)
1105                 ifp->if_hwassist |= CSUM_TSO;
1106         if (ifp->if_capenable & IFCAP_TXCSUM)
1107                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1108         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1109                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1110
1111         /* Set up the device filtering */
1112         bzero(&filter, sizeof(filter));
1113         filter.enable_ethtype = TRUE;
1114         filter.enable_macvlan = TRUE;
1115 #ifdef IXL_FDIR
1116         filter.enable_fdir = TRUE;
1117 #endif
1118         if (i40e_set_filter_control(hw, &filter))
1119                 device_printf(dev, "set_filter_control() failed\n");
1120
1121         /* Set up RSS */
1122         ixl_config_rss(vsi);
1123
1124         /* Setup the VSI */
1125         ixl_setup_vsi(vsi);
1126
1127         /*
1128         ** Prepare the rings, hmc contexts, etc...
1129         */
1130         if (ixl_initialize_vsi(vsi)) {
1131                 device_printf(dev, "initialize vsi failed!!\n");
1132                 return;
1133         }
1134
1135         /* Add protocol filters to list */
1136         ixl_init_filters(vsi);
1137
1138         /* Setup vlan's if needed */
1139         ixl_setup_vlan_filters(vsi);
1140
1141         /* Start the local timer */
1142         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1143
1144         /* Set up MSI/X routing and the ITR settings */
1145         if (ixl_enable_msix) {
1146                 ixl_configure_msix(pf);
1147                 ixl_configure_itr(pf);
1148         } else
1149                 ixl_configure_legacy(pf);
1150
1151         ixl_enable_rings(vsi);
1152
1153         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1154
1155         /* Set MTU in hardware*/
1156         int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1157             TRUE, 0, NULL);
1158         if (aq_error)
1159                 device_printf(vsi->dev,
1160                         "aq_set_mac_config in init error, code %d\n",
1161                     aq_error);
1162
1163         /* And now turn on interrupts */
1164         ixl_enable_intr(vsi);
1165
1166         /* Now inform the stack we're ready */
1167         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1168         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169
1170         return;
1171 }
1172
1173 static void
1174 ixl_init(void *arg)
1175 {
1176         struct ixl_pf *pf = arg;
1177
1178         IXL_PF_LOCK(pf);
1179         ixl_init_locked(pf);
1180         IXL_PF_UNLOCK(pf);
1181         return;
1182 }
1183
1184 /*
1185 **
1186 ** MSIX Interrupt Handlers and Tasklets
1187 **
1188 */
1189 static void
1190 ixl_handle_que(void *context, int pending)
1191 {
1192         struct ixl_queue *que = context;
1193         struct ixl_vsi *vsi = que->vsi;
1194         struct i40e_hw  *hw = vsi->hw;
1195         struct tx_ring  *txr = &que->txr;
1196         struct ifnet    *ifp = vsi->ifp;
1197         bool            more;
1198
1199         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1201                 IXL_TX_LOCK(txr);
1202                 ixl_txeof(que);
1203                 if (!drbr_empty(ifp, txr->br))
1204                         ixl_mq_start_locked(ifp, txr);
1205                 IXL_TX_UNLOCK(txr);
1206                 if (more) {
1207                         taskqueue_enqueue(que->tq, &que->task);
1208                         return;
1209                 }
1210         }
1211
1212         /* Reenable this interrupt - hmmm */
1213         ixl_enable_queue(hw, que->me);
1214         return;
1215 }
1216
1217
1218 /*********************************************************************
1219  *
1220  *  Legacy Interrupt Service routine
1221  *
1222  **********************************************************************/
1223 void
1224 ixl_intr(void *arg)
1225 {
1226         struct ixl_pf           *pf = arg;
1227         struct i40e_hw          *hw =  &pf->hw;
1228         struct ixl_vsi          *vsi = &pf->vsi;
1229         struct ixl_queue        *que = vsi->queues;
1230         struct ifnet            *ifp = vsi->ifp;
1231         struct tx_ring          *txr = &que->txr;
1232         u32                     reg, icr0, mask;
1233         bool                    more_tx, more_rx;
1234
1235         ++que->irqs;
1236
1237         /* Protect against spurious interrupts */
1238         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1239                 return;
1240
1241         icr0 = rd32(hw, I40E_PFINT_ICR0);
1242
1243         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1244         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1245         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1246
1247         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1248
1249         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1250                 taskqueue_enqueue(pf->tq, &pf->adminq);
1251                 return;
1252         }
1253
1254         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1255
1256         IXL_TX_LOCK(txr);
1257         more_tx = ixl_txeof(que);
1258         if (!drbr_empty(vsi->ifp, txr->br))
1259                 more_tx = 1;
1260         IXL_TX_UNLOCK(txr);
1261
1262         /* re-enable other interrupt causes */
1263         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1264
1265         /* And now the queues */
1266         reg = rd32(hw, I40E_QINT_RQCTL(0));
1267         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1268         wr32(hw, I40E_QINT_RQCTL(0), reg);
1269
1270         reg = rd32(hw, I40E_QINT_TQCTL(0));
1271         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1272         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1273         wr32(hw, I40E_QINT_TQCTL(0), reg);
1274
1275         ixl_enable_legacy(hw);
1276
1277         return;
1278 }
1279
1280
1281 /*********************************************************************
1282  *
1283  *  MSIX VSI Interrupt Service routine
1284  *
1285  **********************************************************************/
1286 void
1287 ixl_msix_que(void *arg)
1288 {
1289         struct ixl_queue        *que = arg;
1290         struct ixl_vsi  *vsi = que->vsi;
1291         struct i40e_hw  *hw = vsi->hw;
1292         struct tx_ring  *txr = &que->txr;
1293         bool            more_tx, more_rx;
1294
1295         /* Protect against spurious interrupts */
1296         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1297                 return;
1298
1299         ++que->irqs;
1300
1301         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1302
1303         IXL_TX_LOCK(txr);
1304         more_tx = ixl_txeof(que);
1305         /*
1306         ** Make certain that if the stack 
1307         ** has anything queued the task gets
1308         ** scheduled to handle it.
1309         */
1310         if (!drbr_empty(vsi->ifp, txr->br))
1311                 more_tx = 1;
1312         IXL_TX_UNLOCK(txr);
1313
1314         ixl_set_queue_rx_itr(que);
1315         ixl_set_queue_tx_itr(que);
1316
1317         if (more_tx || more_rx)
1318                 taskqueue_enqueue(que->tq, &que->task);
1319         else
1320                 ixl_enable_queue(hw, que->me);
1321
1322         return;
1323 }
1324
1325
1326 /*********************************************************************
1327  *
1328  *  MSIX Admin Queue Interrupt Service routine
1329  *
1330  **********************************************************************/
1331 static void
1332 ixl_msix_adminq(void *arg)
1333 {
1334         struct ixl_pf   *pf = arg;
1335         struct i40e_hw  *hw = &pf->hw;
1336         u32             reg, mask;
1337
1338         ++pf->admin_irq;
1339
1340         reg = rd32(hw, I40E_PFINT_ICR0);
1341         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1342
1343         /* Check on the cause */
1344         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1345                 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1346
1347         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1348                 ixl_handle_mdd_event(pf);
1349                 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1350         }
1351
1352         if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1353                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1354
1355         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1356         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1357         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1358
1359         taskqueue_enqueue(pf->tq, &pf->adminq);
1360         return;
1361 }
1362
1363 /*********************************************************************
1364  *
1365  *  Media Ioctl callback
1366  *
1367  *  This routine is called whenever the user queries the status of
1368  *  the interface using ifconfig.
1369  *
1370  **********************************************************************/
1371 static void
1372 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1373 {
1374         struct ixl_vsi  *vsi = ifp->if_softc;
1375         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
1376         struct i40e_hw  *hw = &pf->hw;
1377
1378         INIT_DEBUGOUT("ixl_media_status: begin");
1379         IXL_PF_LOCK(pf);
1380
1381         ixl_update_link_status(pf);
1382
1383         ifmr->ifm_status = IFM_AVALID;
1384         ifmr->ifm_active = IFM_ETHER;
1385
1386         if (!vsi->link_up) {
1387                 IXL_PF_UNLOCK(pf);
1388                 return;
1389         }
1390
1391         ifmr->ifm_status |= IFM_ACTIVE;
1392         /* Hardware is always full-duplex */
1393         ifmr->ifm_active |= IFM_FDX;
1394
1395         switch (hw->phy.link_info.phy_type) {
1396                 /* 100 M */
1397                 case I40E_PHY_TYPE_100BASE_TX:
1398                         ifmr->ifm_active |= IFM_100_TX;
1399                         break;
1400                 /* 1 G */
1401                 case I40E_PHY_TYPE_1000BASE_T:
1402                         ifmr->ifm_active |= IFM_1000_T;
1403                         break;
1404                 case I40E_PHY_TYPE_1000BASE_SX:
1405                         ifmr->ifm_active |= IFM_1000_SX;
1406                         break;
1407                 case I40E_PHY_TYPE_1000BASE_LX:
1408                         ifmr->ifm_active |= IFM_1000_LX;
1409                         break;
1410                 /* 10 G */
1411                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1412                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1413                         ifmr->ifm_active |= IFM_10G_TWINAX;
1414                         break;
1415                 case I40E_PHY_TYPE_10GBASE_KR:
1416                         /* 
1417                         ** this is not technically correct
1418                         ** but FreeBSD does not have the media
1419                         ** type defined yet, so its a compromise.
1420                         */
1421                 case I40E_PHY_TYPE_10GBASE_SR:
1422                         ifmr->ifm_active |= IFM_10G_SR;
1423                         break;
1424                 case I40E_PHY_TYPE_10GBASE_LR:
1425                         ifmr->ifm_active |= IFM_10G_LR;
1426                         break;
1427                 case I40E_PHY_TYPE_10GBASE_T:
1428                         ifmr->ifm_active |= IFM_10G_T;
1429                         break;
1430                 /* 40 G */
1431                 case I40E_PHY_TYPE_40GBASE_CR4:
1432                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1433                         ifmr->ifm_active |= IFM_40G_CR4;
1434                         break;
1435                 case I40E_PHY_TYPE_40GBASE_SR4:
1436                         ifmr->ifm_active |= IFM_40G_SR4;
1437                         break;
1438                 case I40E_PHY_TYPE_40GBASE_LR4:
1439                         ifmr->ifm_active |= IFM_40G_LR4;
1440                         break;
1441                 default:
1442                         ifmr->ifm_active |= IFM_UNKNOWN;
1443                         break;
1444         }
1445         /* Report flow control status as well */
1446         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1447                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1448         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1449                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1450
1451         IXL_PF_UNLOCK(pf);
1452
1453         return;
1454 }
1455
1456 /*********************************************************************
1457  *
1458  *  Media Ioctl callback
1459  *
1460  *  This routine is called when the user changes speed/duplex using
1461  *  media/mediopt option with ifconfig.
1462  *
1463  **********************************************************************/
1464 static int
1465 ixl_media_change(struct ifnet * ifp)
1466 {
1467         struct ixl_vsi *vsi = ifp->if_softc;
1468         struct ifmedia *ifm = &vsi->media;
1469
1470         INIT_DEBUGOUT("ixl_media_change: begin");
1471
1472         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1473                 return (EINVAL);
1474
1475         if_printf(ifp, "Media change is currently not supported.\n");
1476
1477         return (ENODEV);
1478 }
1479
1480
1481 #ifdef IXL_FDIR
1482 /*
1483 ** ATR: Application Targetted Receive - creates a filter
1484 **      based on TX flow info that will keep the receive
1485 **      portion of the flow on the same queue. Based on the
1486 **      implementation this is only available for TCP connections
1487 */
1488 void
1489 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1490 {
1491         struct ixl_vsi                  *vsi = que->vsi;
1492         struct tx_ring                  *txr = &que->txr;
1493         struct i40e_filter_program_desc *FDIR;
1494         u32                             ptype, dtype;
1495         int                             idx;
1496
1497         /* check if ATR is enabled and sample rate */
1498         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1499                 return;
1500         /*
1501         ** We sample all TCP SYN/FIN packets,
1502         ** or at the selected sample rate 
1503         */
1504         txr->atr_count++;
1505         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1506             (txr->atr_count < txr->atr_rate))
1507                 return;
1508         txr->atr_count = 0;
1509
1510         /* Get a descriptor to use */
1511         idx = txr->next_avail;
1512         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1513         if (++idx == que->num_desc)
1514                 idx = 0;
1515         txr->avail--;
1516         txr->next_avail = idx;
1517
1518         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1519             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1520
1521         ptype |= (etype == ETHERTYPE_IP) ?
1522             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1523             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1524             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1525             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1526
1527         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1528
1529         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1530
1531         /*
1532         ** We use the TCP TH_FIN as a trigger to remove
1533         ** the filter, otherwise its an update.
1534         */
1535         dtype |= (th->th_flags & TH_FIN) ?
1536             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1537             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1538             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1539             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1540
1541         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1542             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1543
1544         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1545             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1546
1547         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1548         FDIR->dtype_cmd_cntindex = htole32(dtype);
1549         return;
1550 }
1551 #endif
1552
1553
1554 static void
1555 ixl_set_promisc(struct ixl_vsi *vsi)
1556 {
1557         struct ifnet    *ifp = vsi->ifp;
1558         struct i40e_hw  *hw = vsi->hw;
1559         int             err, mcnt = 0;
1560         bool            uni = FALSE, multi = FALSE;
1561
1562         if (ifp->if_flags & IFF_ALLMULTI)
1563                 multi = TRUE;
1564         else { /* Need to count the multicast addresses */
1565                 struct  ifmultiaddr *ifma;
1566                 if_maddr_rlock(ifp);
1567                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1568                         if (ifma->ifma_addr->sa_family != AF_LINK)
1569                                 continue;
1570                         if (mcnt == MAX_MULTICAST_ADDR)
1571                                 break;
1572                         mcnt++;
1573                 }
1574                 if_maddr_runlock(ifp);
1575         }
1576
1577         if (mcnt >= MAX_MULTICAST_ADDR)
1578                 multi = TRUE;
1579         if (ifp->if_flags & IFF_PROMISC)
1580                 uni = TRUE;
1581
1582         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1583             vsi->seid, uni, NULL);
1584         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1585             vsi->seid, multi, NULL);
1586         return;
1587 }
1588
1589 /*********************************************************************
1590  *      Filter Routines
1591  *
1592  *      Routines for multicast and vlan filter management.
1593  *
1594  *********************************************************************/
1595 static void
1596 ixl_add_multi(struct ixl_vsi *vsi)
1597 {
1598         struct  ifmultiaddr     *ifma;
1599         struct ifnet            *ifp = vsi->ifp;
1600         struct i40e_hw          *hw = vsi->hw;
1601         int                     mcnt = 0, flags;
1602
1603         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1604
1605         if_maddr_rlock(ifp);
1606         /*
1607         ** First just get a count, to decide if we
1608         ** we simply use multicast promiscuous.
1609         */
1610         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1611                 if (ifma->ifma_addr->sa_family != AF_LINK)
1612                         continue;
1613                 mcnt++;
1614         }
1615         if_maddr_runlock(ifp);
1616
1617         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1618                 /* delete existing MC filters */
1619                 ixl_del_hw_filters(vsi, mcnt);
1620                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1621                     vsi->seid, TRUE, NULL);
1622                 return;
1623         }
1624
1625         mcnt = 0;
1626         if_maddr_rlock(ifp);
1627         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1628                 if (ifma->ifma_addr->sa_family != AF_LINK)
1629                         continue;
1630                 ixl_add_mc_filter(vsi,
1631                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1632                 mcnt++;
1633         }
1634         if_maddr_runlock(ifp);
1635         if (mcnt > 0) {
1636                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1637                 ixl_add_hw_filters(vsi, flags, mcnt);
1638         }
1639
1640         IOCTL_DEBUGOUT("ixl_add_multi: end");
1641         return;
1642 }
1643
1644 static void
1645 ixl_del_multi(struct ixl_vsi *vsi)
1646 {
1647         struct ifnet            *ifp = vsi->ifp;
1648         struct ifmultiaddr      *ifma;
1649         struct ixl_mac_filter   *f;
1650         int                     mcnt = 0;
1651         bool            match = FALSE;
1652
1653         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1654
1655         /* Search for removed multicast addresses */
1656         if_maddr_rlock(ifp);
1657         SLIST_FOREACH(f, &vsi->ftl, next) {
1658                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1659                         match = FALSE;
1660                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1661                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1662                                         continue;
1663                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1664                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1665                                         match = TRUE;
1666                                         break;
1667                                 }
1668                         }
1669                         if (match == FALSE) {
1670                                 f->flags |= IXL_FILTER_DEL;
1671                                 mcnt++;
1672                         }
1673                 }
1674         }
1675         if_maddr_runlock(ifp);
1676
1677         if (mcnt > 0)
1678                 ixl_del_hw_filters(vsi, mcnt);
1679 }
1680
1681
1682 /*********************************************************************
1683  *  Timer routine
1684  *
1685  *  This routine checks for link status,updates statistics,
1686  *  and runs the watchdog check.
1687  *
1688  **********************************************************************/
1689
1690 static void
1691 ixl_local_timer(void *arg)
1692 {
1693         struct ixl_pf           *pf = arg;
1694         struct i40e_hw          *hw = &pf->hw;
1695         struct ixl_vsi          *vsi = &pf->vsi;
1696         struct ixl_queue        *que = vsi->queues;
1697         device_t                dev = pf->dev;
1698         int                     hung = 0;
1699         u32                     mask;
1700
1701         mtx_assert(&pf->pf_mtx, MA_OWNED);
1702
1703         /* Fire off the adminq task */
1704         taskqueue_enqueue(pf->tq, &pf->adminq);
1705
1706         /* Update stats */
1707         ixl_update_stats_counters(pf);
1708
1709         /*
1710         ** Check status of the queues
1711         */
1712         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1713                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1714  
1715         for (int i = 0; i < vsi->num_queues; i++,que++) {
1716                 /* Any queues with outstanding work get a sw irq */
1717                 if (que->busy)
1718                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1719                 /*
1720                 ** Each time txeof runs without cleaning, but there
1721                 ** are uncleaned descriptors it increments busy. If
1722                 ** we get to 5 we declare it hung.
1723                 */
1724                 if (que->busy == IXL_QUEUE_HUNG) {
1725                         ++hung;
1726                         /* Mark the queue as inactive */
1727                         vsi->active_queues &= ~((u64)1 << que->me);
1728                         continue;
1729                 } else {
1730                         /* Check if we've come back from hung */
1731                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1732                                 vsi->active_queues |= ((u64)1 << que->me);
1733                 }
1734                 if (que->busy >= IXL_MAX_TX_BUSY) {
1735 #ifdef IXL_DEBUG
1736                         device_printf(dev,"Warning queue %d "
1737                             "appears to be hung!\n", i);
1738 #endif
1739                         que->busy = IXL_QUEUE_HUNG;
1740                         ++hung;
1741                 }
1742         }
1743         /* Only reinit if all queues show hung */
1744         if (hung == vsi->num_queues)
1745                 goto hung;
1746
1747         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1748         return;
1749
1750 hung:
1751         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1752         ixl_init_locked(pf);
1753 }
1754
1755 /*
1756 ** Note: this routine updates the OS on the link state
1757 **      the real check of the hardware only happens with
1758 **      a link interrupt.
1759 */
1760 static void
1761 ixl_update_link_status(struct ixl_pf *pf)
1762 {
1763         struct ixl_vsi          *vsi = &pf->vsi;
1764         struct i40e_hw          *hw = &pf->hw;
1765         struct ifnet            *ifp = vsi->ifp;
1766         device_t                dev = pf->dev;
1767         enum i40e_fc_mode       fc;
1768
1769
1770         if (vsi->link_up){ 
1771                 if (vsi->link_active == FALSE) {
1772                         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1773                         if (bootverbose) {
1774                                 fc = hw->fc.current_mode;
1775                                 device_printf(dev,"Link is up %d Gbps %s,"
1776                                     " Flow Control: %s\n",
1777                                     ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1778                                     "Full Duplex", ixl_fc_string[fc]);
1779                         }
1780                         vsi->link_active = TRUE;
1781                         /*
1782                         ** Warn user if link speed on NPAR enabled
1783                         ** partition is not at least 10GB
1784                         */
1785                         if (hw->func_caps.npar_enable &&
1786                            (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1787                            hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1788                                 device_printf(dev, "The partition detected link"
1789                                     "speed that is less than 10Gbps\n");
1790                         if_link_state_change(ifp, LINK_STATE_UP);
1791                 }
1792         } else { /* Link down */
1793                 if (vsi->link_active == TRUE) {
1794                         if (bootverbose)
1795                                 device_printf(dev,"Link is Down\n");
1796                         if_link_state_change(ifp, LINK_STATE_DOWN);
1797                         vsi->link_active = FALSE;
1798                 }
1799         }
1800
1801         return;
1802 }
1803
1804 /*********************************************************************
1805  *
1806  *  This routine disables all traffic on the adapter by issuing a
1807  *  global reset on the MAC and deallocates TX/RX buffers.
1808  *
1809  **********************************************************************/
1810
1811 static void
1812 ixl_stop(struct ixl_pf *pf)
1813 {
1814         struct ixl_vsi  *vsi = &pf->vsi;
1815         struct ifnet    *ifp = vsi->ifp;
1816
1817         mtx_assert(&pf->pf_mtx, MA_OWNED);
1818
1819         INIT_DEBUGOUT("ixl_stop: begin\n");
1820         ixl_disable_intr(vsi);
1821         ixl_disable_rings(vsi);
1822
1823         /* Tell the stack that the interface is no longer active */
1824         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1825
1826         /* Stop the local timer */
1827         callout_stop(&pf->timer);
1828
1829         return;
1830 }
1831
1832
1833 /*********************************************************************
1834  *
1835  *  Setup MSIX Interrupt resources and handlers for the VSI
1836  *
1837  **********************************************************************/
1838 static int
1839 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1840 {
1841         device_t        dev = pf->dev;
1842         struct          ixl_vsi *vsi = &pf->vsi;
1843         struct          ixl_queue *que = vsi->queues;
1844         int             error, rid = 0;
1845
1846         if (pf->msix == 1)
1847                 rid = 1;
1848         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1849             &rid, RF_SHAREABLE | RF_ACTIVE);
1850         if (pf->res == NULL) {
1851                 device_printf(dev,"Unable to allocate"
1852                     " bus resource: vsi legacy/msi interrupt\n");
1853                 return (ENXIO);
1854         }
1855
1856         /* Set the handler function */
1857         error = bus_setup_intr(dev, pf->res,
1858             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1859             ixl_intr, pf, &pf->tag);
1860         if (error) {
1861                 pf->res = NULL;
1862                 device_printf(dev, "Failed to register legacy/msi handler");
1863                 return (error);
1864         }
1865         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1866         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1867         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1868         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1869             taskqueue_thread_enqueue, &que->tq);
1870         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1871             device_get_nameunit(dev));
1872         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1873         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1874             taskqueue_thread_enqueue, &pf->tq);
1875         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1876             device_get_nameunit(dev));
1877
1878         return (0);
1879 }
1880
1881
1882 /*********************************************************************
1883  *
1884  *  Setup MSIX Interrupt resources and handlers for the VSI
1885  *
1886  **********************************************************************/
1887 static int
1888 ixl_assign_vsi_msix(struct ixl_pf *pf)
1889 {
1890         device_t        dev = pf->dev;
1891         struct          ixl_vsi *vsi = &pf->vsi;
1892         struct          ixl_queue *que = vsi->queues;
1893         struct          tx_ring  *txr;
1894         int             error, rid, vector = 0;
1895
1896         /* Admin Que is vector 0*/
1897         rid = vector + 1;
1898         pf->res = bus_alloc_resource_any(dev,
1899             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1900         if (!pf->res) {
1901                 device_printf(dev,"Unable to allocate"
1902             " bus resource: Adminq interrupt [%d]\n", rid);
1903                 return (ENXIO);
1904         }
1905         /* Set the adminq vector and handler */
1906         error = bus_setup_intr(dev, pf->res,
1907             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1908             ixl_msix_adminq, pf, &pf->tag);
1909         if (error) {
1910                 pf->res = NULL;
1911                 device_printf(dev, "Failed to register Admin que handler");
1912                 return (error);
1913         }
1914         bus_describe_intr(dev, pf->res, pf->tag, "aq");
1915         pf->admvec = vector;
1916         /* Tasklet for Admin Queue */
1917         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1918         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1919             taskqueue_thread_enqueue, &pf->tq);
1920         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1921             device_get_nameunit(pf->dev));
1922         ++vector;
1923
1924         /* Now set up the stations */
1925         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1926                 int cpu_id = i;
1927                 rid = vector + 1;
1928                 txr = &que->txr;
1929                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1930                     RF_SHAREABLE | RF_ACTIVE);
1931                 if (que->res == NULL) {
1932                         device_printf(dev,"Unable to allocate"
1933                             " bus resource: que interrupt [%d]\n", vector);
1934                         return (ENXIO);
1935                 }
1936                 /* Set the handler function */
1937                 error = bus_setup_intr(dev, que->res,
1938                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1939                     ixl_msix_que, que, &que->tag);
1940                 if (error) {
1941                         que->res = NULL;
1942                         device_printf(dev, "Failed to register que handler");
1943                         return (error);
1944                 }
1945                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1946                 /* Bind the vector to a CPU */
1947 #ifdef RSS
1948                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1949 #endif
1950                 bus_bind_intr(dev, que->res, cpu_id);
1951                 que->msix = vector;
1952                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1953                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1954                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1955                     taskqueue_thread_enqueue, &que->tq);
1956 #ifdef RSS
1957                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1958                     cpu_id, "%s (bucket %d)",
1959                     device_get_nameunit(dev), cpu_id);
1960 #else
1961                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1962                     "%s que", device_get_nameunit(dev));
1963 #endif
1964         }
1965
1966         return (0);
1967 }
1968
1969
1970 /*
1971  * Allocate MSI/X vectors
1972  */
1973 static int
1974 ixl_init_msix(struct ixl_pf *pf)
1975 {
1976         device_t dev = pf->dev;
1977         int rid, want, vectors, queues, available;
1978
1979         /* Override by tuneable */
1980         if (ixl_enable_msix == 0)
1981                 goto msi;
1982
1983         /*
1984         ** When used in a virtualized environment 
1985         ** PCI BUSMASTER capability may not be set
1986         ** so explicity set it here and rewrite
1987         ** the ENABLE in the MSIX control register
1988         ** at this point to cause the host to
1989         ** successfully initialize us.
1990         */
1991         {
1992                 u16 pci_cmd_word;
1993                 int msix_ctrl;
1994                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1995                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1996                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1997                 pci_find_cap(dev, PCIY_MSIX, &rid);
1998                 rid += PCIR_MSIX_CTRL;
1999                 msix_ctrl = pci_read_config(dev, rid, 2);
2000                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2001                 pci_write_config(dev, rid, msix_ctrl, 2);
2002         }
2003
2004         /* First try MSI/X */
2005         rid = PCIR_BAR(IXL_BAR);
2006         pf->msix_mem = bus_alloc_resource_any(dev,
2007             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2008         if (!pf->msix_mem) {
2009                 /* May not be enabled */
2010                 device_printf(pf->dev,
2011                     "Unable to map MSIX table \n");
2012                 goto msi;
2013         }
2014
2015         available = pci_msix_count(dev); 
2016         if (available == 0) { /* system has msix disabled */
2017                 bus_release_resource(dev, SYS_RES_MEMORY,
2018                     rid, pf->msix_mem);
2019                 pf->msix_mem = NULL;
2020                 goto msi;
2021         }
2022
2023         /* Figure out a reasonable auto config value */
2024         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2025
2026         /* Override with hardcoded value if sane */
2027         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
2028                 queues = ixl_max_queues;
2029
2030 #ifdef  RSS
2031         /* If we're doing RSS, clamp at the number of RSS buckets */
2032         if (queues > rss_getnumbuckets())
2033                 queues = rss_getnumbuckets();
2034 #endif
2035
2036         /*
2037         ** Want one vector (RX/TX pair) per queue
2038         ** plus an additional for the admin queue.
2039         */
2040         want = queues + 1;
2041         if (want <= available)  /* Have enough */
2042                 vectors = want;
2043         else {
2044                 device_printf(pf->dev,
2045                     "MSIX Configuration Problem, "
2046                     "%d vectors available but %d wanted!\n",
2047                     available, want);
2048                 return (0); /* Will go to Legacy setup */
2049         }
2050
2051         if (pci_alloc_msix(dev, &vectors) == 0) {
2052                 device_printf(pf->dev,
2053                     "Using MSIX interrupts with %d vectors\n", vectors);
2054                 pf->msix = vectors;
2055                 pf->vsi.num_queues = queues;
2056 #ifdef RSS
2057                 /*
2058                  * If we're doing RSS, the number of queues needs to
2059                  * match the number of RSS buckets that are configured.
2060                  *
2061                  * + If there's more queues than RSS buckets, we'll end
2062                  *   up with queues that get no traffic.
2063                  *
2064                  * + If there's more RSS buckets than queues, we'll end
2065                  *   up having multiple RSS buckets map to the same queue,
2066                  *   so there'll be some contention.
2067                  */
2068                 if (queues != rss_getnumbuckets()) {
2069                         device_printf(dev,
2070                             "%s: queues (%d) != RSS buckets (%d)"
2071                             "; performance will be impacted.\n",
2072                             __func__, queues, rss_getnumbuckets());
2073                 }
2074 #endif
2075                 return (vectors);
2076         }
2077 msi:
2078         vectors = pci_msi_count(dev);
2079         pf->vsi.num_queues = 1;
2080         pf->msix = 1;
2081         ixl_max_queues = 1;
2082         ixl_enable_msix = 0;
2083         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2084                 device_printf(pf->dev,"Using an MSI interrupt\n");
2085         else {
2086                 pf->msix = 0;
2087                 device_printf(pf->dev,"Using a Legacy interrupt\n");
2088         }
2089         return (vectors);
2090 }
2091
2092
2093 /*
2094  * Plumb MSI/X vectors
2095  */
2096 static void
2097 ixl_configure_msix(struct ixl_pf *pf)
2098 {
2099         struct i40e_hw  *hw = &pf->hw;
2100         struct ixl_vsi *vsi = &pf->vsi;
2101         u32             reg;
2102         u16             vector = 1;
2103
2104         /* First set up the adminq - vector 0 */
2105         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2106         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2107
2108         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2109             I40E_PFINT_ICR0_ENA_GRST_MASK |
2110             I40E_PFINT_ICR0_HMC_ERR_MASK |
2111             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2112             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2113             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2114             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2115         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2116
2117         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2118         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2119
2120         wr32(hw, I40E_PFINT_DYN_CTL0,
2121             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2122             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2123
2124         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2125
2126         /* Next configure the queues */
2127         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2128                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2129                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2130
2131                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2132                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2133                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2134                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2135                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2136                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2137
2138                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2139                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2140                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2141                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2142                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2143                 if (i == (vsi->num_queues - 1))
2144                         reg |= (IXL_QUEUE_EOL
2145                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2146                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2147         }
2148 }
2149
2150 /*
2151  * Configure for MSI single vector operation 
2152  */
2153 static void
2154 ixl_configure_legacy(struct ixl_pf *pf)
2155 {
2156         struct i40e_hw  *hw = &pf->hw;
2157         u32             reg;
2158
2159
2160         wr32(hw, I40E_PFINT_ITR0(0), 0);
2161         wr32(hw, I40E_PFINT_ITR0(1), 0);
2162
2163
2164         /* Setup "other" causes */
2165         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2166             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2167             | I40E_PFINT_ICR0_ENA_GRST_MASK
2168             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2169             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2170             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2171             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2172             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2173             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2174             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2175             ;
2176         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2177
2178         /* SW_ITR_IDX = 0, but don't change INTENA */
2179         wr32(hw, I40E_PFINT_DYN_CTL0,
2180             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2181             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2182         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2183         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2184
2185         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2186         wr32(hw, I40E_PFINT_LNKLST0, 0);
2187
2188         /* Associate the queue pair to the vector and enable the q int */
2189         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2190             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2191             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2192         wr32(hw, I40E_QINT_RQCTL(0), reg);
2193
2194         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2195             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2196             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2197         wr32(hw, I40E_QINT_TQCTL(0), reg);
2198
2199         /* Next enable the queue pair */
2200         reg = rd32(hw, I40E_QTX_ENA(0));
2201         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2202         wr32(hw, I40E_QTX_ENA(0), reg);
2203
2204         reg = rd32(hw, I40E_QRX_ENA(0));
2205         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2206         wr32(hw, I40E_QRX_ENA(0), reg);
2207 }
2208
2209
2210 /*
2211  * Set the Initial ITR state
2212  */
2213 static void
2214 ixl_configure_itr(struct ixl_pf *pf)
2215 {
2216         struct i40e_hw          *hw = &pf->hw;
2217         struct ixl_vsi          *vsi = &pf->vsi;
2218         struct ixl_queue        *que = vsi->queues;
2219
2220         vsi->rx_itr_setting = ixl_rx_itr;
2221         if (ixl_dynamic_rx_itr)
2222                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2223         vsi->tx_itr_setting = ixl_tx_itr;
2224         if (ixl_dynamic_tx_itr)
2225                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2226         
2227         for (int i = 0; i < vsi->num_queues; i++, que++) {
2228                 struct tx_ring  *txr = &que->txr;
2229                 struct rx_ring  *rxr = &que->rxr;
2230
2231                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2232                     vsi->rx_itr_setting);
2233                 rxr->itr = vsi->rx_itr_setting;
2234                 rxr->latency = IXL_AVE_LATENCY;
2235                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2236                     vsi->tx_itr_setting);
2237                 txr->itr = vsi->tx_itr_setting;
2238                 txr->latency = IXL_AVE_LATENCY;
2239         }
2240 }
2241
2242
2243 static int
2244 ixl_allocate_pci_resources(struct ixl_pf *pf)
2245 {
2246         int             rid;
2247         device_t        dev = pf->dev;
2248
2249         rid = PCIR_BAR(0);
2250         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2251             &rid, RF_ACTIVE);
2252
2253         if (!(pf->pci_mem)) {
2254                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2255                 return (ENXIO);
2256         }
2257
2258         pf->osdep.mem_bus_space_tag =
2259                 rman_get_bustag(pf->pci_mem);
2260         pf->osdep.mem_bus_space_handle =
2261                 rman_get_bushandle(pf->pci_mem);
2262         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2263         pf->osdep.flush_reg = I40E_GLGEN_STAT;
2264         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2265
2266         pf->hw.back = &pf->osdep;
2267
2268         /*
2269         ** Now setup MSI or MSI/X, should
2270         ** return us the number of supported
2271         ** vectors. (Will be 1 for MSI)
2272         */
2273         pf->msix = ixl_init_msix(pf);
2274         return (0);
2275 }
2276
2277 static void
2278 ixl_free_pci_resources(struct ixl_pf * pf)
2279 {
2280         struct ixl_vsi          *vsi = &pf->vsi;
2281         struct ixl_queue        *que = vsi->queues;
2282         device_t                dev = pf->dev;
2283         int                     rid, memrid;
2284
2285         memrid = PCIR_BAR(IXL_BAR);
2286
2287         /* We may get here before stations are setup */
2288         if ((!ixl_enable_msix) || (que == NULL))
2289                 goto early;
2290
2291         /*
2292         **  Release all msix VSI resources:
2293         */
2294         for (int i = 0; i < vsi->num_queues; i++, que++) {
2295                 rid = que->msix + 1;
2296                 if (que->tag != NULL) {
2297                         bus_teardown_intr(dev, que->res, que->tag);
2298                         que->tag = NULL;
2299                 }
2300                 if (que->res != NULL)
2301                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2302         }
2303
2304 early:
2305         /* Clean the AdminQ interrupt last */
2306         if (pf->admvec) /* we are doing MSIX */
2307                 rid = pf->admvec + 1;
2308         else
2309                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2310
2311         if (pf->tag != NULL) {
2312                 bus_teardown_intr(dev, pf->res, pf->tag);
2313                 pf->tag = NULL;
2314         }
2315         if (pf->res != NULL)
2316                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2317
2318         if (pf->msix)
2319                 pci_release_msi(dev);
2320
2321         if (pf->msix_mem != NULL)
2322                 bus_release_resource(dev, SYS_RES_MEMORY,
2323                     memrid, pf->msix_mem);
2324
2325         if (pf->pci_mem != NULL)
2326                 bus_release_resource(dev, SYS_RES_MEMORY,
2327                     PCIR_BAR(0), pf->pci_mem);
2328
2329         return;
2330 }
2331
2332 static void
2333 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2334 {
2335         /* Display supported media types */
2336         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2337                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2338
2339         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2340                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2341
2342         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2343             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2344                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2345         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2346                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2347         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2348                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2349         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2350                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2351
2352         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2353             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2354                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2355         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2356                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2357         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2358                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2359 }
2360
2361 /*********************************************************************
2362  *
2363  *  Setup networking device structure and register an interface.
2364  *
2365  **********************************************************************/
2366 static int
2367 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2368 {
2369         struct ifnet            *ifp;
2370         struct i40e_hw          *hw = vsi->hw;
2371         struct ixl_queue        *que = vsi->queues;
2372         struct i40e_aq_get_phy_abilities_resp abilities_resp;
2373         enum i40e_status_code aq_error = 0;
2374
2375         INIT_DEBUGOUT("ixl_setup_interface: begin");
2376
2377         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2378         if (ifp == NULL) {
2379                 device_printf(dev, "can not allocate ifnet structure\n");
2380                 return (-1);
2381         }
2382         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2383         ifp->if_mtu = ETHERMTU;
2384         if_initbaudrate(ifp, IF_Gbps(40));
2385         ifp->if_init = ixl_init;
2386         ifp->if_softc = vsi;
2387         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2388         ifp->if_ioctl = ixl_ioctl;
2389
2390 #if __FreeBSD_version >= 1100036
2391         if_setgetcounterfn(ifp, ixl_get_counter);
2392 #endif
2393
2394         ifp->if_transmit = ixl_mq_start;
2395
2396         ifp->if_qflush = ixl_qflush;
2397
2398         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2399
2400         vsi->max_frame_size =
2401             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2402             + ETHER_VLAN_ENCAP_LEN;
2403
2404         /*
2405          * Tell the upper layer(s) we support long frames.
2406          */
2407         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2408
2409         ifp->if_capabilities |= IFCAP_HWCSUM;
2410         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2411         ifp->if_capabilities |= IFCAP_TSO;
2412         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2413         ifp->if_capabilities |= IFCAP_LRO;
2414
2415         /* VLAN capabilties */
2416         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2417                              |  IFCAP_VLAN_HWTSO
2418                              |  IFCAP_VLAN_MTU
2419                              |  IFCAP_VLAN_HWCSUM;
2420         ifp->if_capenable = ifp->if_capabilities;
2421
2422         /*
2423         ** Don't turn this on by default, if vlans are
2424         ** created on another pseudo device (eg. lagg)
2425         ** then vlan events are not passed thru, breaking
2426         ** operation, but with HW FILTER off it works. If
2427         ** using vlans directly on the ixl driver you can
2428         ** enable this and get full hardware tag filtering.
2429         */
2430         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2431
2432         /*
2433          * Specify the media types supported by this adapter and register
2434          * callbacks to update media and link information
2435          */
2436         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2437                      ixl_media_status);
2438
2439         aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2440         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2441                 /* Need delay to detect fiber correctly */
2442                 i40e_msec_delay(200);
2443                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2444                     TRUE, &abilities_resp, NULL);
2445                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2446                         device_printf(dev, "Unknown PHY type detected!\n");
2447                 else
2448                         ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2449         } else if (aq_error) {
2450                 device_printf(dev, "Error getting supported media types, err %d,"
2451                     " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2452         } else
2453                 ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2454
2455         /* Use autoselect media by default */
2456         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2457         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2458
2459         ether_ifattach(ifp, hw->mac.addr);
2460
2461         return (0);
2462 }
2463
2464 static bool
2465 ixl_config_link(struct i40e_hw *hw)
2466 {
2467         bool check;
2468
2469         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2470         check = i40e_get_link_status(hw);
2471 #ifdef IXL_DEBUG
2472         printf("Link is %s\n", check ? "up":"down");
2473 #endif
2474         return (check);
2475 }
2476
2477 /*********************************************************************
2478  *
2479  *  Initialize this VSI 
2480  *
2481  **********************************************************************/
2482 static int
2483 ixl_setup_vsi(struct ixl_vsi *vsi)
2484 {
2485         struct i40e_hw  *hw = vsi->hw;
2486         device_t        dev = vsi->dev;
2487         struct i40e_aqc_get_switch_config_resp *sw_config;
2488         struct i40e_vsi_context ctxt;
2489         u8      aq_buf[I40E_AQ_LARGE_BUF];
2490         int     ret = I40E_SUCCESS;
2491         u16     next = 0;
2492
2493         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2494         ret = i40e_aq_get_switch_config(hw, sw_config,
2495             sizeof(aq_buf), &next, NULL);
2496         if (ret) {
2497                 device_printf(dev,"aq_get_switch_config failed!!\n");
2498                 return (ret);
2499         }
2500 #ifdef IXL_DEBUG
2501         printf("Switch config: header reported: %d in structure, %d total\n",
2502             sw_config->header.num_reported, sw_config->header.num_total);
2503         printf("type=%d seid=%d uplink=%d downlink=%d\n",
2504             sw_config->element[0].element_type,
2505             sw_config->element[0].seid,
2506             sw_config->element[0].uplink_seid,
2507             sw_config->element[0].downlink_seid);
2508 #endif
2509         /* Save off this important value */
2510         vsi->seid = sw_config->element[0].seid;
2511
2512         memset(&ctxt, 0, sizeof(ctxt));
2513         ctxt.seid = vsi->seid;
2514         ctxt.pf_num = hw->pf_id;
2515         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2516         if (ret) {
2517                 device_printf(dev,"get vsi params failed %x!!\n", ret);
2518                 return (ret);
2519         }
2520 #ifdef IXL_DEBUG
2521         printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2522             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2523             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2524             ctxt.uplink_seid, ctxt.vsi_number,
2525             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2526             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2527             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2528 #endif
2529         /*
2530         ** Set the queue and traffic class bits
2531         **  - when multiple traffic classes are supported
2532         **    this will need to be more robust.
2533         */
2534         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2535         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2536         ctxt.info.queue_mapping[0] = 0; 
2537         ctxt.info.tc_mapping[0] = 0x0800; 
2538
2539         /* Set VLAN receive stripping mode */
2540         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2541         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2542         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2543             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2544         else
2545             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2546
2547         /* Keep copy of VSI info in VSI for statistic counters */
2548         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2549
2550         /* Reset VSI statistics */
2551         ixl_vsi_reset_stats(vsi);
2552         vsi->hw_filters_add = 0;
2553         vsi->hw_filters_del = 0;
2554
2555         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2556         if (ret)
2557                 device_printf(dev,"update vsi params failed %x!!\n",
2558                    hw->aq.asq_last_status);
2559         return (ret);
2560 }
2561
2562
2563 /*********************************************************************
2564  *
2565  *  Initialize the VSI:  this handles contexts, which means things
2566  *                       like the number of descriptors, buffer size,
2567  *                       plus we init the rings thru this function.
2568  *
2569  **********************************************************************/
2570 static int
2571 ixl_initialize_vsi(struct ixl_vsi *vsi)
2572 {
2573         struct ixl_queue        *que = vsi->queues;
2574         device_t                dev = vsi->dev;
2575         struct i40e_hw          *hw = vsi->hw;
2576         int                     err = 0;
2577
2578
2579         for (int i = 0; i < vsi->num_queues; i++, que++) {
2580                 struct tx_ring          *txr = &que->txr;
2581                 struct rx_ring          *rxr = &que->rxr;
2582                 struct i40e_hmc_obj_txq tctx;
2583                 struct i40e_hmc_obj_rxq rctx;
2584                 u32                     txctl;
2585                 u16                     size;
2586
2587
2588                 /* Setup the HMC TX Context  */
2589                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2590                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2591                 tctx.new_context = 1;
2592                 tctx.base = (txr->dma.pa/128);
2593                 tctx.qlen = que->num_desc;
2594                 tctx.fc_ena = 0;
2595                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2596                 /* Enable HEAD writeback */
2597                 tctx.head_wb_ena = 1;
2598                 tctx.head_wb_addr = txr->dma.pa +
2599                     (que->num_desc * sizeof(struct i40e_tx_desc));
2600                 tctx.rdylist_act = 0;
2601                 err = i40e_clear_lan_tx_queue_context(hw, i);
2602                 if (err) {
2603                         device_printf(dev, "Unable to clear TX context\n");
2604                         break;
2605                 }
2606                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2607                 if (err) {
2608                         device_printf(dev, "Unable to set TX context\n");
2609                         break;
2610                 }
2611                 /* Associate the ring with this PF */
2612                 txctl = I40E_QTX_CTL_PF_QUEUE;
2613                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2614                     I40E_QTX_CTL_PF_INDX_MASK);
2615                 wr32(hw, I40E_QTX_CTL(i), txctl);
2616                 ixl_flush(hw);
2617
2618                 /* Do ring (re)init */
2619                 ixl_init_tx_ring(que);
2620
2621                 /* Next setup the HMC RX Context  */
2622                 if (vsi->max_frame_size <= 2048)
2623                         rxr->mbuf_sz = MCLBYTES;
2624                 else
2625                         rxr->mbuf_sz = MJUMPAGESIZE;
2626
2627                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2628
2629                 /* Set up an RX context for the HMC */
2630                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2631                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2632                 /* ignore header split for now */
2633                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2634                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2635                     vsi->max_frame_size : max_rxmax;
2636                 rctx.dtype = 0;
2637                 rctx.dsize = 1; /* do 32byte descriptors */
2638                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2639                 rctx.base = (rxr->dma.pa/128);
2640                 rctx.qlen = que->num_desc;
2641                 rctx.tphrdesc_ena = 1;
2642                 rctx.tphwdesc_ena = 1;
2643                 rctx.tphdata_ena = 0;
2644                 rctx.tphhead_ena = 0;
2645                 rctx.lrxqthresh = 2;
2646                 rctx.crcstrip = 1;
2647                 rctx.l2tsel = 1;
2648                 rctx.showiv = 1;
2649                 rctx.fc_ena = 0;
2650                 rctx.prefena = 1;
2651
2652                 err = i40e_clear_lan_rx_queue_context(hw, i);
2653                 if (err) {
2654                         device_printf(dev,
2655                             "Unable to clear RX context %d\n", i);
2656                         break;
2657                 }
2658                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2659                 if (err) {
2660                         device_printf(dev, "Unable to set RX context %d\n", i);
2661                         break;
2662                 }
2663                 err = ixl_init_rx_ring(que);
2664                 if (err) {
2665                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2666                         break;
2667                 }
2668                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2669                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2670         }
2671         return (err);
2672 }
2673
2674
2675 /*********************************************************************
2676  *
2677  *  Free all VSI structs.
2678  *
2679  **********************************************************************/
2680 void
2681 ixl_free_vsi(struct ixl_vsi *vsi)
2682 {
2683         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2684         struct ixl_queue        *que = vsi->queues;
2685         struct ixl_mac_filter *f;
2686
2687         /* Free station queues */
2688         for (int i = 0; i < vsi->num_queues; i++, que++) {
2689                 struct tx_ring *txr = &que->txr;
2690                 struct rx_ring *rxr = &que->rxr;
2691         
2692                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2693                         continue;
2694                 IXL_TX_LOCK(txr);
2695                 ixl_free_que_tx(que);
2696                 if (txr->base)
2697                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2698                 IXL_TX_UNLOCK(txr);
2699                 IXL_TX_LOCK_DESTROY(txr);
2700
2701                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2702                         continue;
2703                 IXL_RX_LOCK(rxr);
2704                 ixl_free_que_rx(que);
2705                 if (rxr->base)
2706                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2707                 IXL_RX_UNLOCK(rxr);
2708                 IXL_RX_LOCK_DESTROY(rxr);
2709                 
2710         }
2711         free(vsi->queues, M_DEVBUF);
2712
2713         /* Free VSI filter list */
2714         while (!SLIST_EMPTY(&vsi->ftl)) {
2715                 f = SLIST_FIRST(&vsi->ftl);
2716                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2717                 free(f, M_DEVBUF);
2718         }
2719 }
2720
2721
2722 /*********************************************************************
2723  *
2724  *  Allocate memory for the VSI (virtual station interface) and their
2725  *  associated queues, rings and the descriptors associated with each,
2726  *  called only once at attach.
2727  *
2728  **********************************************************************/
2729 static int
2730 ixl_setup_stations(struct ixl_pf *pf)
2731 {
2732         device_t                dev = pf->dev;
2733         struct ixl_vsi          *vsi;
2734         struct ixl_queue        *que;
2735         struct tx_ring          *txr;
2736         struct rx_ring          *rxr;
2737         int                     rsize, tsize;
2738         int                     error = I40E_SUCCESS;
2739
2740         vsi = &pf->vsi;
2741         vsi->back = (void *)pf;
2742         vsi->hw = &pf->hw;
2743         vsi->id = 0;
2744         vsi->num_vlans = 0;
2745
2746         /* Get memory for the station queues */
2747         if (!(vsi->queues =
2748             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2749             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2750                 device_printf(dev, "Unable to allocate queue memory\n");
2751                 error = ENOMEM;
2752                 goto early;
2753         }
2754
2755         for (int i = 0; i < vsi->num_queues; i++) {
2756                 que = &vsi->queues[i];
2757                 que->num_desc = ixl_ringsz;
2758                 que->me = i;
2759                 que->vsi = vsi;
2760                 /* mark the queue as active */
2761                 vsi->active_queues |= (u64)1 << que->me;
2762                 txr = &que->txr;
2763                 txr->que = que;
2764                 txr->tail = I40E_QTX_TAIL(que->me);
2765
2766                 /* Initialize the TX lock */
2767                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2768                     device_get_nameunit(dev), que->me);
2769                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2770                 /* Create the TX descriptor ring */
2771                 tsize = roundup2((que->num_desc *
2772                     sizeof(struct i40e_tx_desc)) +
2773                     sizeof(u32), DBA_ALIGN);
2774                 if (i40e_allocate_dma_mem(&pf->hw,
2775                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2776                         device_printf(dev,
2777                             "Unable to allocate TX Descriptor memory\n");
2778                         error = ENOMEM;
2779                         goto fail;
2780                 }
2781                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2782                 bzero((void *)txr->base, tsize);
2783                 /* Now allocate transmit soft structs for the ring */
2784                 if (ixl_allocate_tx_data(que)) {
2785                         device_printf(dev,
2786                             "Critical Failure setting up TX structures\n");
2787                         error = ENOMEM;
2788                         goto fail;
2789                 }
2790                 /* Allocate a buf ring */
2791                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2792                     M_WAITOK, &txr->mtx);
2793                 if (txr->br == NULL) {
2794                         device_printf(dev,
2795                             "Critical Failure setting up TX buf ring\n");
2796                         error = ENOMEM;
2797                         goto fail;
2798                 }
2799
2800                 /*
2801                  * Next the RX queues...
2802                  */ 
2803                 rsize = roundup2(que->num_desc *
2804                     sizeof(union i40e_rx_desc), DBA_ALIGN);
2805                 rxr = &que->rxr;
2806                 rxr->que = que;
2807                 rxr->tail = I40E_QRX_TAIL(que->me);
2808
2809                 /* Initialize the RX side lock */
2810                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2811                     device_get_nameunit(dev), que->me);
2812                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2813
2814                 if (i40e_allocate_dma_mem(&pf->hw,
2815                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2816                         device_printf(dev,
2817                             "Unable to allocate RX Descriptor memory\n");
2818                         error = ENOMEM;
2819                         goto fail;
2820                 }
2821                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2822                 bzero((void *)rxr->base, rsize);
2823
2824                 /* Allocate receive soft structs for the ring*/
2825                 if (ixl_allocate_rx_data(que)) {
2826                         device_printf(dev,
2827                             "Critical Failure setting up receive structs\n");
2828                         error = ENOMEM;
2829                         goto fail;
2830                 }
2831         }
2832
2833         return (0);
2834
2835 fail:
2836         for (int i = 0; i < vsi->num_queues; i++) {
2837                 que = &vsi->queues[i];
2838                 rxr = &que->rxr;
2839                 txr = &que->txr;
2840                 if (rxr->base)
2841                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2842                 if (txr->base)
2843                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2844         }
2845
2846 early:
2847         return (error);
2848 }
2849
2850 /*
2851 ** Provide a update to the queue RX
2852 ** interrupt moderation value.
2853 */
2854 static void
2855 ixl_set_queue_rx_itr(struct ixl_queue *que)
2856 {
2857         struct ixl_vsi  *vsi = que->vsi;
2858         struct i40e_hw  *hw = vsi->hw;
2859         struct rx_ring  *rxr = &que->rxr;
2860         u16             rx_itr;
2861         u16             rx_latency = 0;
2862         int             rx_bytes;
2863
2864
2865         /* Idle, do nothing */
2866         if (rxr->bytes == 0)
2867                 return;
2868
2869         if (ixl_dynamic_rx_itr) {
2870                 rx_bytes = rxr->bytes/rxr->itr;
2871                 rx_itr = rxr->itr;
2872
2873                 /* Adjust latency range */
2874                 switch (rxr->latency) {
2875                 case IXL_LOW_LATENCY:
2876                         if (rx_bytes > 10) {
2877                                 rx_latency = IXL_AVE_LATENCY;
2878                                 rx_itr = IXL_ITR_20K;
2879                         }
2880                         break;
2881                 case IXL_AVE_LATENCY:
2882                         if (rx_bytes > 20) {
2883                                 rx_latency = IXL_BULK_LATENCY;
2884                                 rx_itr = IXL_ITR_8K;
2885                         } else if (rx_bytes <= 10) {
2886                                 rx_latency = IXL_LOW_LATENCY;
2887                                 rx_itr = IXL_ITR_100K;
2888                         }
2889                         break;
2890                 case IXL_BULK_LATENCY:
2891                         if (rx_bytes <= 20) {
2892                                 rx_latency = IXL_AVE_LATENCY;
2893                                 rx_itr = IXL_ITR_20K;
2894                         }
2895                         break;
2896                  }
2897
2898                 rxr->latency = rx_latency;
2899
2900                 if (rx_itr != rxr->itr) {
2901                         /* do an exponential smoothing */
2902                         rx_itr = (10 * rx_itr * rxr->itr) /
2903                             ((9 * rx_itr) + rxr->itr);
2904                         rxr->itr = rx_itr & IXL_MAX_ITR;
2905                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2906                             que->me), rxr->itr);
2907                 }
2908         } else { /* We may have have toggled to non-dynamic */
2909                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2910                         vsi->rx_itr_setting = ixl_rx_itr;
2911                 /* Update the hardware if needed */
2912                 if (rxr->itr != vsi->rx_itr_setting) {
2913                         rxr->itr = vsi->rx_itr_setting;
2914                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2915                             que->me), rxr->itr);
2916                 }
2917         }
2918         rxr->bytes = 0;
2919         rxr->packets = 0;
2920         return;
2921 }
2922
2923
2924 /*
2925 ** Provide a update to the queue TX
2926 ** interrupt moderation value.
2927 */
2928 static void
2929 ixl_set_queue_tx_itr(struct ixl_queue *que)
2930 {
2931         struct ixl_vsi  *vsi = que->vsi;
2932         struct i40e_hw  *hw = vsi->hw;
2933         struct tx_ring  *txr = &que->txr;
2934         u16             tx_itr;
2935         u16             tx_latency = 0;
2936         int             tx_bytes;
2937
2938
2939         /* Idle, do nothing */
2940         if (txr->bytes == 0)
2941                 return;
2942
2943         if (ixl_dynamic_tx_itr) {
2944                 tx_bytes = txr->bytes/txr->itr;
2945                 tx_itr = txr->itr;
2946
2947                 switch (txr->latency) {
2948                 case IXL_LOW_LATENCY:
2949                         if (tx_bytes > 10) {
2950                                 tx_latency = IXL_AVE_LATENCY;
2951                                 tx_itr = IXL_ITR_20K;
2952                         }
2953                         break;
2954                 case IXL_AVE_LATENCY:
2955                         if (tx_bytes > 20) {
2956                                 tx_latency = IXL_BULK_LATENCY;
2957                                 tx_itr = IXL_ITR_8K;
2958                         } else if (tx_bytes <= 10) {
2959                                 tx_latency = IXL_LOW_LATENCY;
2960                                 tx_itr = IXL_ITR_100K;
2961                         }
2962                         break;
2963                 case IXL_BULK_LATENCY:
2964                         if (tx_bytes <= 20) {
2965                                 tx_latency = IXL_AVE_LATENCY;
2966                                 tx_itr = IXL_ITR_20K;
2967                         }
2968                         break;
2969                 }
2970
2971                 txr->latency = tx_latency;
2972
2973                 if (tx_itr != txr->itr) {
2974                  /* do an exponential smoothing */
2975                         tx_itr = (10 * tx_itr * txr->itr) /
2976                             ((9 * tx_itr) + txr->itr);
2977                         txr->itr = tx_itr & IXL_MAX_ITR;
2978                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2979                             que->me), txr->itr);
2980                 }
2981
2982         } else { /* We may have have toggled to non-dynamic */
2983                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2984                         vsi->tx_itr_setting = ixl_tx_itr;
2985                 /* Update the hardware if needed */
2986                 if (txr->itr != vsi->tx_itr_setting) {
2987                         txr->itr = vsi->tx_itr_setting;
2988                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2989                             que->me), txr->itr);
2990                 }
2991         }
2992         txr->bytes = 0;
2993         txr->packets = 0;
2994         return;
2995 }
2996
2997
2998 static void
2999 ixl_add_hw_stats(struct ixl_pf *pf)
3000 {
3001         device_t dev = pf->dev;
3002         struct ixl_vsi *vsi = &pf->vsi;
3003         struct ixl_queue *queues = vsi->queues;
3004         struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3005         struct i40e_hw_port_stats *pf_stats = &pf->stats;
3006
3007         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3008         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3009         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3010
3011         struct sysctl_oid *vsi_node, *queue_node;
3012         struct sysctl_oid_list *vsi_list, *queue_list;
3013
3014         struct tx_ring *txr;
3015         struct rx_ring *rxr;
3016
3017         /* Driver statistics */
3018         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3019                         CTLFLAG_RD, &pf->watchdog_events,
3020                         "Watchdog timeouts");
3021         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3022                         CTLFLAG_RD, &pf->admin_irq,
3023                         "Admin Queue IRQ Handled");
3024
3025         /* VSI statistics */
3026 #define QUEUE_NAME_LEN 32
3027         char queue_namebuf[QUEUE_NAME_LEN];
3028         
3029         // ERJ: Only one vsi now, re-do when >1 VSI enabled
3030         // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3031         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3032                                    CTLFLAG_RD, NULL, "VSI-specific stats");
3033         vsi_list = SYSCTL_CHILDREN(vsi_node);
3034
3035         ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3036
3037         /* Queue statistics */
3038         for (int q = 0; q < vsi->num_queues; q++) {
3039                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3040                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3041                                              CTLFLAG_RD, NULL, "Queue #");
3042                 queue_list = SYSCTL_CHILDREN(queue_node);
3043
3044                 txr = &(queues[q].txr);
3045                 rxr = &(queues[q].rxr);
3046
3047                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3048                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3049                                 "m_defrag() failed");
3050                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3051                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3052                                 "Driver dropped packets");
3053                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3054                                 CTLFLAG_RD, &(queues[q].irqs),
3055                                 "irqs on this queue");
3056                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3057                                 CTLFLAG_RD, &(queues[q].tso),
3058                                 "TSO");
3059                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3060                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3061                                 "Driver tx dma failure in xmit");
3062                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3063                                 CTLFLAG_RD, &(txr->no_desc),
3064                                 "Queue No Descriptor Available");
3065                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3066                                 CTLFLAG_RD, &(txr->total_packets),
3067                                 "Queue Packets Transmitted");
3068                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3069                                 CTLFLAG_RD, &(txr->tx_bytes),
3070                                 "Queue Bytes Transmitted");
3071                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3072                                 CTLFLAG_RD, &(rxr->rx_packets),
3073                                 "Queue Packets Received");
3074                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3075                                 CTLFLAG_RD, &(rxr->rx_bytes),
3076                                 "Queue Bytes Received");
3077         }
3078
3079         /* MAC stats */
3080         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3081 }
3082
3083 static void
3084 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3085         struct sysctl_oid_list *child,
3086         struct i40e_eth_stats *eth_stats)
3087 {
3088         struct ixl_sysctl_info ctls[] =
3089         {
3090                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3091                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3092                         "Unicast Packets Received"},
3093                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3094                         "Multicast Packets Received"},
3095                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3096                         "Broadcast Packets Received"},
3097                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3098                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3099                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3100                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3101                         "Multicast Packets Transmitted"},
3102                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3103                         "Broadcast Packets Transmitted"},
3104                 // end
3105                 {0,0,0}
3106         };
3107
3108         struct ixl_sysctl_info *entry = ctls;
3109         while (entry->stat != 0)
3110         {
3111                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3112                                 CTLFLAG_RD, entry->stat,
3113                                 entry->description);
3114                 entry++;
3115         }
3116 }
3117
3118 static void
3119 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3120         struct sysctl_oid_list *child,
3121         struct i40e_hw_port_stats *stats)
3122 {
3123         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3124                                     CTLFLAG_RD, NULL, "Mac Statistics");
3125         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3126
3127         struct i40e_eth_stats *eth_stats = &stats->eth;
3128         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3129
3130         struct ixl_sysctl_info ctls[] = 
3131         {
3132                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3133                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3134                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3135                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3136                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3137                 /* Packet Reception Stats */
3138                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3139                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3140                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3141                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3142                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3143                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3144                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3145                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3146                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3147                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3148                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3149                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3150                 /* Packet Transmission Stats */
3151                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3152                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3153                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3154                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3155                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3156                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3157                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3158                 /* Flow control */
3159                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3160                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3161                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3162                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3163                 /* End */
3164                 {0,0,0}
3165         };
3166
3167         struct ixl_sysctl_info *entry = ctls;
3168         while (entry->stat != 0)
3169         {
3170                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3171                                 CTLFLAG_RD, entry->stat,
3172                                 entry->description);
3173                 entry++;
3174         }
3175 }
3176
3177 /*
3178 ** ixl_config_rss - setup RSS 
3179 **  - note this is done for the single vsi
3180 */
3181 static void ixl_config_rss(struct ixl_vsi *vsi)
3182 {
3183         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3184         struct i40e_hw  *hw = vsi->hw;
3185         u32             lut = 0;
3186         u64             set_hena = 0, hena;
3187         int             i, j, que_id;
3188 #ifdef RSS
3189         u32             rss_hash_config;
3190         u32             rss_seed[IXL_KEYSZ];
3191 #else
3192         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3193                             0x183cfd8c, 0xce880440, 0x580cbc3c,
3194                             0x35897377, 0x328b25e1, 0x4fa98922,
3195                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3196 #endif
3197
3198 #ifdef RSS
3199         /* Fetch the configured RSS key */
3200         rss_getkey((uint8_t *) &rss_seed);
3201 #endif
3202
3203         /* Fill out hash function seed */
3204         for (i = 0; i < IXL_KEYSZ; i++)
3205                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3206
3207         /* Enable PCTYPES for RSS: */
3208 #ifdef RSS
3209         rss_hash_config = rss_gethashconfig();
3210         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3211                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3212         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3213                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3214         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3215                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3216         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3217                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3218         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3219                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3220         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3221                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3222         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3223                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3224 #else
3225         set_hena =
3226                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3227                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3228                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3229                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3230                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3231                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3232                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3233                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3234                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3235                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3236                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3237 #endif
3238         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3239             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3240         hena |= set_hena;
3241         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3242         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3243
3244         /* Populate the LUT with max no. of queues in round robin fashion */
3245         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3246                 if (j == vsi->num_queues)
3247                         j = 0;
3248 #ifdef RSS
3249                 /*
3250                  * Fetch the RSS bucket id for the given indirection entry.
3251                  * Cap it at the number of configured buckets (which is
3252                  * num_queues.)
3253                  */
3254                 que_id = rss_get_indirection_to_bucket(i);
3255                 que_id = que_id % vsi->num_queues;
3256 #else
3257                 que_id = j;
3258 #endif
3259                 /* lut = 4-byte sliding window of 4 lut entries */
3260                 lut = (lut << 8) | (que_id &
3261                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3262                 /* On i = 3, we have 4 entries in lut; write to the register */
3263                 if ((i & 3) == 3)
3264                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3265         }
3266         ixl_flush(hw);
3267 }
3268
3269
3270 /*
3271 ** This routine is run via an vlan config EVENT,
3272 ** it enables us to use the HW Filter table since
3273 ** we can get the vlan id. This just creates the
3274 ** entry in the soft version of the VFTA, init will
3275 ** repopulate the real table.
3276 */
3277 static void
3278 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3279 {
3280         struct ixl_vsi  *vsi = ifp->if_softc;
3281         struct i40e_hw  *hw = vsi->hw;
3282         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3283
3284         if (ifp->if_softc !=  arg)   /* Not our event */
3285                 return;
3286
3287         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3288                 return;
3289
3290         IXL_PF_LOCK(pf);
3291         ++vsi->num_vlans;
3292         ixl_add_filter(vsi, hw->mac.addr, vtag);
3293         IXL_PF_UNLOCK(pf);
3294 }
3295
3296 /*
3297 ** This routine is run via an vlan
3298 ** unconfig EVENT, remove our entry
3299 ** in the soft vfta.
3300 */
3301 static void
3302 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3303 {
3304         struct ixl_vsi  *vsi = ifp->if_softc;
3305         struct i40e_hw  *hw = vsi->hw;
3306         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3307
3308         if (ifp->if_softc !=  arg)
3309                 return;
3310
3311         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3312                 return;
3313
3314         IXL_PF_LOCK(pf);
3315         --vsi->num_vlans;
3316         ixl_del_filter(vsi, hw->mac.addr, vtag);
3317         IXL_PF_UNLOCK(pf);
3318 }
3319
3320 /*
3321 ** This routine updates vlan filters, called by init
3322 ** it scans the filter table and then updates the hw
3323 ** after a soft reset.
3324 */
3325 static void
3326 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3327 {
3328         struct ixl_mac_filter   *f;
3329         int                     cnt = 0, flags;
3330
3331         if (vsi->num_vlans == 0)
3332                 return;
3333         /*
3334         ** Scan the filter list for vlan entries,
3335         ** mark them for addition and then call
3336         ** for the AQ update.
3337         */
3338         SLIST_FOREACH(f, &vsi->ftl, next) {
3339                 if (f->flags & IXL_FILTER_VLAN) {
3340                         f->flags |=
3341                             (IXL_FILTER_ADD |
3342                             IXL_FILTER_USED);
3343                         cnt++;
3344                 }
3345         }
3346         if (cnt == 0) {
3347                 printf("setup vlan: no filters found!\n");
3348                 return;
3349         }
3350         flags = IXL_FILTER_VLAN;
3351         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3352         ixl_add_hw_filters(vsi, flags, cnt);
3353         return;
3354 }
3355
3356 /*
3357 ** Initialize filter list and add filters that the hardware
3358 ** needs to know about.
3359 */
3360 static void
3361 ixl_init_filters(struct ixl_vsi *vsi)
3362 {
3363         /* Add broadcast address */
3364         u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3365         ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3366 }
3367
3368 /*
3369 ** This routine adds mulicast filters
3370 */
3371 static void
3372 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3373 {
3374         struct ixl_mac_filter *f;
3375
3376         /* Does one already exist */
3377         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3378         if (f != NULL)
3379                 return;
3380
3381         f = ixl_get_filter(vsi);
3382         if (f == NULL) {
3383                 printf("WARNING: no filter available!!\n");
3384                 return;
3385         }
3386         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3387         f->vlan = IXL_VLAN_ANY;
3388         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3389             | IXL_FILTER_MC);
3390
3391         return;
3392 }
3393
3394 /*
3395 ** This routine adds macvlan filters
3396 */
3397 static void
3398 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3399 {
3400         struct ixl_mac_filter   *f, *tmp;
3401         device_t                dev = vsi->dev;
3402
3403         DEBUGOUT("ixl_add_filter: begin");
3404
3405         /* Does one already exist */
3406         f = ixl_find_filter(vsi, macaddr, vlan);
3407         if (f != NULL)
3408                 return;
3409         /*
3410         ** Is this the first vlan being registered, if so we
3411         ** need to remove the ANY filter that indicates we are
3412         ** not in a vlan, and replace that with a 0 filter.
3413         */
3414         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3415                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3416                 if (tmp != NULL) {
3417                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3418                         ixl_add_filter(vsi, macaddr, 0);
3419                 }
3420         }
3421
3422         f = ixl_get_filter(vsi);
3423         if (f == NULL) {
3424                 device_printf(dev, "WARNING: no filter available!!\n");
3425                 return;
3426         }
3427         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3428         f->vlan = vlan;
3429         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3430         if (f->vlan != IXL_VLAN_ANY)
3431                 f->flags |= IXL_FILTER_VLAN;
3432
3433         ixl_add_hw_filters(vsi, f->flags, 1);
3434         return;
3435 }
3436
3437 static void
3438 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3439 {
3440         struct ixl_mac_filter *f;
3441
3442         f = ixl_find_filter(vsi, macaddr, vlan);
3443         if (f == NULL)
3444                 return;
3445
3446         f->flags |= IXL_FILTER_DEL;
3447         ixl_del_hw_filters(vsi, 1);
3448
3449         /* Check if this is the last vlan removal */
3450         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3451                 /* Switch back to a non-vlan filter */
3452                 ixl_del_filter(vsi, macaddr, 0);
3453                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3454         }
3455         return;
3456 }
3457
3458 /*
3459 ** Find the filter with both matching mac addr and vlan id
3460 */
3461 static struct ixl_mac_filter *
3462 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3463 {
3464         struct ixl_mac_filter   *f;
3465         bool                    match = FALSE;
3466
3467         SLIST_FOREACH(f, &vsi->ftl, next) {
3468                 if (!cmp_etheraddr(f->macaddr, macaddr))
3469                         continue;
3470                 if (f->vlan == vlan) {
3471                         match = TRUE;
3472                         break;
3473                 }
3474         }       
3475
3476         if (!match)
3477                 f = NULL;
3478         return (f);
3479 }
3480
3481 /*
3482 ** This routine takes additions to the vsi filter
3483 ** table and creates an Admin Queue call to create
3484 ** the filters in the hardware.
3485 */
3486 static void
3487 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3488 {
3489         struct i40e_aqc_add_macvlan_element_data *a, *b;
3490         struct ixl_mac_filter   *f;
3491         struct i40e_hw  *hw = vsi->hw;
3492         device_t        dev = vsi->dev;
3493         int             err, j = 0;
3494
3495         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3496             M_DEVBUF, M_NOWAIT | M_ZERO);
3497         if (a == NULL) {
3498                 device_printf(dev, "add_hw_filters failed to get memory\n");
3499                 return;
3500         }
3501
3502         /*
3503         ** Scan the filter list, each time we find one
3504         ** we add it to the admin queue array and turn off
3505         ** the add bit.
3506         */
3507         SLIST_FOREACH(f, &vsi->ftl, next) {
3508                 if (f->flags == flags) {
3509                         b = &a[j]; // a pox on fvl long names :)
3510                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3511                         b->vlan_tag =
3512                             (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3513                         b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3514                         f->flags &= ~IXL_FILTER_ADD;
3515                         j++;
3516                 }
3517                 if (j == cnt)
3518                         break;
3519         }
3520         if (j > 0) {
3521                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3522                 if (err) 
3523                         device_printf(dev, "aq_add_macvlan err %d, aq_error %d\n",
3524                             err, hw->aq.asq_last_status);
3525                 else
3526                         vsi->hw_filters_add += j;
3527         }
3528         free(a, M_DEVBUF);
3529         return;
3530 }
3531
3532 /*
3533 ** This routine takes removals in the vsi filter
3534 ** table and creates an Admin Queue call to delete
3535 ** the filters in the hardware.
3536 */
3537 static void
3538 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3539 {
3540         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3541         struct i40e_hw          *hw = vsi->hw;
3542         device_t                dev = vsi->dev;
3543         struct ixl_mac_filter   *f, *f_temp;
3544         int                     err, j = 0;
3545
3546         DEBUGOUT("ixl_del_hw_filters: begin\n");
3547
3548         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3549             M_DEVBUF, M_NOWAIT | M_ZERO);
3550         if (d == NULL) {
3551                 printf("del hw filter failed to get memory\n");
3552                 return;
3553         }
3554
3555         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3556                 if (f->flags & IXL_FILTER_DEL) {
3557                         e = &d[j]; // a pox on fvl long names :)
3558                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3559                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3560                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3561                         /* delete entry from vsi list */
3562                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3563                         free(f, M_DEVBUF);
3564                         j++;
3565                 }
3566                 if (j == cnt)
3567                         break;
3568         }
3569         if (j > 0) {
3570                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3571                 /* NOTE: returns ENOENT every time but seems to work fine,
3572                    so we'll ignore that specific error. */
3573                 // TODO: Does this still occur on current firmwares?
3574                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3575                         int sc = 0;
3576                         for (int i = 0; i < j; i++)
3577                                 sc += (!d[i].error_code);
3578                         vsi->hw_filters_del += sc;
3579                         device_printf(dev,
3580                             "Failed to remove %d/%d filters, aq error %d\n",
3581                             j - sc, j, hw->aq.asq_last_status);
3582                 } else
3583                         vsi->hw_filters_del += j;
3584         }
3585         free(d, M_DEVBUF);
3586
3587         DEBUGOUT("ixl_del_hw_filters: end\n");
3588         return;
3589 }
3590
3591
3592 static void
3593 ixl_enable_rings(struct ixl_vsi *vsi)
3594 {
3595         struct i40e_hw  *hw = vsi->hw;
3596         u32             reg;
3597
3598         for (int i = 0; i < vsi->num_queues; i++) {
3599                 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3600
3601                 reg = rd32(hw, I40E_QTX_ENA(i));
3602                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3603                     I40E_QTX_ENA_QENA_STAT_MASK;
3604                 wr32(hw, I40E_QTX_ENA(i), reg);
3605                 /* Verify the enable took */
3606                 for (int j = 0; j < 10; j++) {
3607                         reg = rd32(hw, I40E_QTX_ENA(i));
3608                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3609                                 break;
3610                         i40e_msec_delay(10);
3611                 }
3612                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3613                         printf("TX queue %d disabled!\n", i);
3614
3615                 reg = rd32(hw, I40E_QRX_ENA(i));
3616                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3617                     I40E_QRX_ENA_QENA_STAT_MASK;
3618                 wr32(hw, I40E_QRX_ENA(i), reg);
3619                 /* Verify the enable took */
3620                 for (int j = 0; j < 10; j++) {
3621                         reg = rd32(hw, I40E_QRX_ENA(i));
3622                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3623                                 break;
3624                         i40e_msec_delay(10);
3625                 }
3626                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3627                         printf("RX queue %d disabled!\n", i);
3628         }
3629 }
3630
3631 static void
3632 ixl_disable_rings(struct ixl_vsi *vsi)
3633 {
3634         struct i40e_hw  *hw = vsi->hw;
3635         u32             reg;
3636
3637         for (int i = 0; i < vsi->num_queues; i++) {
3638                 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3639                 i40e_usec_delay(500);
3640
3641                 reg = rd32(hw, I40E_QTX_ENA(i));
3642                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3643                 wr32(hw, I40E_QTX_ENA(i), reg);
3644                 /* Verify the disable took */
3645                 for (int j = 0; j < 10; j++) {
3646                         reg = rd32(hw, I40E_QTX_ENA(i));
3647                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3648                                 break;
3649                         i40e_msec_delay(10);
3650                 }
3651                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3652                         printf("TX queue %d still enabled!\n", i);
3653
3654                 reg = rd32(hw, I40E_QRX_ENA(i));
3655                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3656                 wr32(hw, I40E_QRX_ENA(i), reg);
3657                 /* Verify the disable took */
3658                 for (int j = 0; j < 10; j++) {
3659                         reg = rd32(hw, I40E_QRX_ENA(i));
3660                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3661                                 break;
3662                         i40e_msec_delay(10);
3663                 }
3664                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3665                         printf("RX queue %d still enabled!\n", i);
3666         }
3667 }
3668
3669 /**
3670  * ixl_handle_mdd_event
3671  *
3672  * Called from interrupt handler to identify possibly malicious vfs
3673  * (But also detects events from the PF, as well)
3674  **/
3675 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3676 {
3677         struct i40e_hw *hw = &pf->hw;
3678         device_t dev = pf->dev;
3679         bool mdd_detected = false;
3680         bool pf_mdd_detected = false;
3681         u32 reg;
3682
3683         /* find what triggered the MDD event */
3684         reg = rd32(hw, I40E_GL_MDET_TX);
3685         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3686                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3687                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3688                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3689                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3690                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3691                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3692                 device_printf(dev,
3693                          "Malicious Driver Detection event 0x%02x"
3694                          " on TX queue %d pf number 0x%02x\n",
3695                          event, queue, pf_num);
3696                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3697                 mdd_detected = true;
3698         }
3699         reg = rd32(hw, I40E_GL_MDET_RX);
3700         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3701                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3702                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3703                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3704                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3705                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3706                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3707                 device_printf(dev,
3708                          "Malicious Driver Detection event 0x%02x"
3709                          " on RX queue %d of function 0x%02x\n",
3710                          event, queue, func);
3711                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3712                 mdd_detected = true;
3713         }
3714
3715         if (mdd_detected) {
3716                 reg = rd32(hw, I40E_PF_MDET_TX);
3717                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3718                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3719                         device_printf(dev,
3720                                  "MDD TX event is for this function 0x%08x",
3721                                  reg);
3722                         pf_mdd_detected = true;
3723                 }
3724                 reg = rd32(hw, I40E_PF_MDET_RX);
3725                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3726                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3727                         device_printf(dev,
3728                                  "MDD RX event is for this function 0x%08x",
3729                                  reg);
3730                         pf_mdd_detected = true;
3731                 }
3732         }
3733
3734         /* re-enable mdd interrupt cause */
3735         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3736         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3737         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3738         ixl_flush(hw);
3739 }
3740
3741 static void
3742 ixl_enable_intr(struct ixl_vsi *vsi)
3743 {
3744         struct i40e_hw          *hw = vsi->hw;
3745         struct ixl_queue        *que = vsi->queues;
3746
3747         if (ixl_enable_msix) {
3748                 ixl_enable_adminq(hw);
3749                 for (int i = 0; i < vsi->num_queues; i++, que++)
3750                         ixl_enable_queue(hw, que->me);
3751         } else
3752                 ixl_enable_legacy(hw);
3753 }
3754
3755 static void
3756 ixl_disable_intr(struct ixl_vsi *vsi)
3757 {
3758         struct i40e_hw          *hw = vsi->hw;
3759         struct ixl_queue        *que = vsi->queues;
3760
3761         if (ixl_enable_msix) {
3762                 ixl_disable_adminq(hw);
3763                 for (int i = 0; i < vsi->num_queues; i++, que++)
3764                         ixl_disable_queue(hw, que->me);
3765         } else
3766                 ixl_disable_legacy(hw);
3767 }
3768
3769 static void
3770 ixl_enable_adminq(struct i40e_hw *hw)
3771 {
3772         u32             reg;
3773
3774         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3775             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3776             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3777         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3778         ixl_flush(hw);
3779         return;
3780 }
3781
3782 static void
3783 ixl_disable_adminq(struct i40e_hw *hw)
3784 {
3785         u32             reg;
3786
3787         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3788         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3789
3790         return;
3791 }
3792
3793 static void
3794 ixl_enable_queue(struct i40e_hw *hw, int id)
3795 {
3796         u32             reg;
3797
3798         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3799             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3800             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3801         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3802 }
3803
3804 static void
3805 ixl_disable_queue(struct i40e_hw *hw, int id)
3806 {
3807         u32             reg;
3808
3809         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3810         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3811
3812         return;
3813 }
3814
3815 static void
3816 ixl_enable_legacy(struct i40e_hw *hw)
3817 {
3818         u32             reg;
3819         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3820             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3821             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3822         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3823 }
3824
3825 static void
3826 ixl_disable_legacy(struct i40e_hw *hw)
3827 {
3828         u32             reg;
3829
3830         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3831         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3832
3833         return;
3834 }
3835
3836 static void
3837 ixl_update_stats_counters(struct ixl_pf *pf)
3838 {
3839         struct i40e_hw  *hw = &pf->hw;
3840         struct ixl_vsi *vsi = &pf->vsi;
3841
3842         struct i40e_hw_port_stats *nsd = &pf->stats;
3843         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3844
3845         /* Update hw stats */
3846         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3847                            pf->stat_offsets_loaded,
3848                            &osd->crc_errors, &nsd->crc_errors);
3849         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3850                            pf->stat_offsets_loaded,
3851                            &osd->illegal_bytes, &nsd->illegal_bytes);
3852         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3853                            I40E_GLPRT_GORCL(hw->port),
3854                            pf->stat_offsets_loaded,
3855                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3856         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3857                            I40E_GLPRT_GOTCL(hw->port),
3858                            pf->stat_offsets_loaded,
3859                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3860         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3861                            pf->stat_offsets_loaded,
3862                            &osd->eth.rx_discards,
3863                            &nsd->eth.rx_discards);
3864         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3865                            I40E_GLPRT_UPRCL(hw->port),
3866                            pf->stat_offsets_loaded,
3867                            &osd->eth.rx_unicast,
3868                            &nsd->eth.rx_unicast);
3869         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3870                            I40E_GLPRT_UPTCL(hw->port),
3871                            pf->stat_offsets_loaded,
3872                            &osd->eth.tx_unicast,
3873                            &nsd->eth.tx_unicast);
3874         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3875                            I40E_GLPRT_MPRCL(hw->port),
3876                            pf->stat_offsets_loaded,
3877                            &osd->eth.rx_multicast,
3878                            &nsd->eth.rx_multicast);
3879         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3880                            I40E_GLPRT_MPTCL(hw->port),
3881                            pf->stat_offsets_loaded,
3882                            &osd->eth.tx_multicast,
3883                            &nsd->eth.tx_multicast);
3884         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3885                            I40E_GLPRT_BPRCL(hw->port),
3886                            pf->stat_offsets_loaded,
3887                            &osd->eth.rx_broadcast,
3888                            &nsd->eth.rx_broadcast);
3889         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3890                            I40E_GLPRT_BPTCL(hw->port),
3891                            pf->stat_offsets_loaded,
3892                            &osd->eth.tx_broadcast,
3893                            &nsd->eth.tx_broadcast);
3894
3895         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3896                            pf->stat_offsets_loaded,
3897                            &osd->tx_dropped_link_down,
3898                            &nsd->tx_dropped_link_down);
3899         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3900                            pf->stat_offsets_loaded,
3901                            &osd->mac_local_faults,
3902                            &nsd->mac_local_faults);
3903         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3904                            pf->stat_offsets_loaded,
3905                            &osd->mac_remote_faults,
3906                            &nsd->mac_remote_faults);
3907         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3908                            pf->stat_offsets_loaded,
3909                            &osd->rx_length_errors,
3910                            &nsd->rx_length_errors);
3911
3912         /* Flow control (LFC) stats */
3913         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3914                            pf->stat_offsets_loaded,
3915                            &osd->link_xon_rx, &nsd->link_xon_rx);
3916         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3917                            pf->stat_offsets_loaded,
3918                            &osd->link_xon_tx, &nsd->link_xon_tx);
3919         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3920                            pf->stat_offsets_loaded,
3921                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3922         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3923                            pf->stat_offsets_loaded,
3924                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3925
3926         /* Packet size stats rx */
3927         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3928                            I40E_GLPRT_PRC64L(hw->port),
3929                            pf->stat_offsets_loaded,
3930                            &osd->rx_size_64, &nsd->rx_size_64);
3931         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3932                            I40E_GLPRT_PRC127L(hw->port),
3933                            pf->stat_offsets_loaded,
3934                            &osd->rx_size_127, &nsd->rx_size_127);
3935         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3936                            I40E_GLPRT_PRC255L(hw->port),
3937                            pf->stat_offsets_loaded,
3938                            &osd->rx_size_255, &nsd->rx_size_255);
3939         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3940                            I40E_GLPRT_PRC511L(hw->port),
3941                            pf->stat_offsets_loaded,
3942                            &osd->rx_size_511, &nsd->rx_size_511);
3943         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3944                            I40E_GLPRT_PRC1023L(hw->port),
3945                            pf->stat_offsets_loaded,
3946                            &osd->rx_size_1023, &nsd->rx_size_1023);
3947         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3948                            I40E_GLPRT_PRC1522L(hw->port),
3949                            pf->stat_offsets_loaded,
3950                            &osd->rx_size_1522, &nsd->rx_size_1522);
3951         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3952                            I40E_GLPRT_PRC9522L(hw->port),
3953                            pf->stat_offsets_loaded,
3954                            &osd->rx_size_big, &nsd->rx_size_big);
3955
3956         /* Packet size stats tx */
3957         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3958                            I40E_GLPRT_PTC64L(hw->port),
3959                            pf->stat_offsets_loaded,
3960                            &osd->tx_size_64, &nsd->tx_size_64);
3961         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3962                            I40E_GLPRT_PTC127L(hw->port),
3963                            pf->stat_offsets_loaded,
3964                            &osd->tx_size_127, &nsd->tx_size_127);
3965         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3966                            I40E_GLPRT_PTC255L(hw->port),
3967                            pf->stat_offsets_loaded,
3968                            &osd->tx_size_255, &nsd->tx_size_255);
3969         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3970                            I40E_GLPRT_PTC511L(hw->port),
3971                            pf->stat_offsets_loaded,
3972                            &osd->tx_size_511, &nsd->tx_size_511);
3973         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3974                            I40E_GLPRT_PTC1023L(hw->port),
3975                            pf->stat_offsets_loaded,
3976                            &osd->tx_size_1023, &nsd->tx_size_1023);
3977         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3978                            I40E_GLPRT_PTC1522L(hw->port),
3979                            pf->stat_offsets_loaded,
3980                            &osd->tx_size_1522, &nsd->tx_size_1522);
3981         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3982                            I40E_GLPRT_PTC9522L(hw->port),
3983                            pf->stat_offsets_loaded,
3984                            &osd->tx_size_big, &nsd->tx_size_big);
3985
3986         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3987                            pf->stat_offsets_loaded,
3988                            &osd->rx_undersize, &nsd->rx_undersize);
3989         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3990                            pf->stat_offsets_loaded,
3991                            &osd->rx_fragments, &nsd->rx_fragments);
3992         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3993                            pf->stat_offsets_loaded,
3994                            &osd->rx_oversize, &nsd->rx_oversize);
3995         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3996                            pf->stat_offsets_loaded,
3997                            &osd->rx_jabber, &nsd->rx_jabber);
3998         pf->stat_offsets_loaded = true;
3999         /* End hw stats */
4000
4001         /* Update vsi stats */
4002         ixl_update_eth_stats(vsi);
4003
4004         /* OS statistics */
4005         // ERJ - these are per-port, update all vsis?
4006         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4007 }
4008
4009 /*
4010 ** Tasklet handler for MSIX Adminq interrupts
4011 **  - do outside interrupt since it might sleep
4012 */
4013 static void
4014 ixl_do_adminq(void *context, int pending)
4015 {
4016         struct ixl_pf                   *pf = context;
4017         struct i40e_hw                  *hw = &pf->hw;
4018         struct ixl_vsi                  *vsi = &pf->vsi;
4019         struct i40e_arq_event_info      event;
4020         i40e_status                     ret;
4021         u32                             reg, loop = 0;
4022         u16                             opcode, result;
4023
4024         event.buf_len = IXL_AQ_BUF_SZ;
4025         event.msg_buf = malloc(event.buf_len,
4026             M_DEVBUF, M_NOWAIT | M_ZERO);
4027         if (!event.msg_buf) {
4028                 printf("Unable to allocate adminq memory\n");
4029                 return;
4030         }
4031
4032         /* clean and process any events */
4033         do {
4034                 ret = i40e_clean_arq_element(hw, &event, &result);
4035                 if (ret)
4036                         break;
4037                 opcode = LE16_TO_CPU(event.desc.opcode);
4038                 switch (opcode) {
4039                 case i40e_aqc_opc_get_link_status:
4040                         vsi->link_up = ixl_config_link(hw);
4041                         ixl_update_link_status(pf);
4042                         break;
4043                 case i40e_aqc_opc_send_msg_to_pf:
4044                         /* process pf/vf communication here */
4045                         break;
4046                 case i40e_aqc_opc_event_lan_overflow:
4047                         break;
4048                 default:
4049 #ifdef IXL_DEBUG
4050                         printf("AdminQ unknown event %x\n", opcode);
4051 #endif
4052                         break;
4053                 }
4054
4055         } while (result && (loop++ < IXL_ADM_LIMIT));
4056
4057         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4058         reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4059         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4060         free(event.msg_buf, M_DEVBUF);
4061
4062         if (pf->msix > 1)
4063                 ixl_enable_adminq(&pf->hw);
4064         else
4065                 ixl_enable_intr(vsi);
4066 }
4067
4068 static int
4069 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4070 {
4071         struct ixl_pf   *pf;
4072         int             error, input = 0;
4073
4074         error = sysctl_handle_int(oidp, &input, 0, req);
4075
4076         if (error || !req->newptr)
4077                 return (error);
4078
4079         if (input == 1) {
4080                 pf = (struct ixl_pf *)arg1;
4081                 ixl_print_debug_info(pf);
4082         }
4083
4084         return (error);
4085 }
4086
4087 static void
4088 ixl_print_debug_info(struct ixl_pf *pf)
4089 {
4090         struct i40e_hw          *hw = &pf->hw;
4091         struct ixl_vsi          *vsi = &pf->vsi;
4092         struct ixl_queue        *que = vsi->queues;
4093         struct rx_ring          *rxr = &que->rxr;
4094         struct tx_ring          *txr = &que->txr;
4095         u32                     reg;    
4096
4097
4098         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4099         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4100         printf("RX next check = %x\n", rxr->next_check);
4101         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4102         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4103         printf("TX desc avail = %x\n", txr->avail);
4104
4105         reg = rd32(hw, I40E_GLV_GORCL(0xc));
4106          printf("RX Bytes = %x\n", reg);
4107         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4108          printf("Port RX Bytes = %x\n", reg);
4109         reg = rd32(hw, I40E_GLV_RDPC(0xc));
4110          printf("RX discard = %x\n", reg);
4111         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4112          printf("Port RX discard = %x\n", reg);
4113
4114         reg = rd32(hw, I40E_GLV_TEPC(0xc));
4115          printf("TX errors = %x\n", reg);
4116         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4117          printf("TX Bytes = %x\n", reg);
4118
4119         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4120          printf("RX undersize = %x\n", reg);
4121         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4122          printf("RX fragments = %x\n", reg);
4123         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4124          printf("RX oversize = %x\n", reg);
4125         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4126          printf("RX length error = %x\n", reg);
4127         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4128          printf("mac remote fault = %x\n", reg);
4129         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4130          printf("mac local fault = %x\n", reg);
4131 }
4132
4133 /**
4134  * Update VSI-specific ethernet statistics counters.
4135  **/
4136 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4137 {
4138         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4139         struct i40e_hw *hw = &pf->hw;
4140         struct i40e_eth_stats *es;
4141         struct i40e_eth_stats *oes;
4142         int i;
4143         uint64_t tx_discards;
4144         struct i40e_hw_port_stats *nsd;
4145         u16 stat_idx = vsi->info.stat_counter_idx;
4146
4147         es = &vsi->eth_stats;
4148         oes = &vsi->eth_stats_offsets;
4149         nsd = &pf->stats;
4150
4151         /* Gather up the stats that the hw collects */
4152         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4153                            vsi->stat_offsets_loaded,
4154                            &oes->tx_errors, &es->tx_errors);
4155         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4156                            vsi->stat_offsets_loaded,
4157                            &oes->rx_discards, &es->rx_discards);
4158
4159         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4160                            I40E_GLV_GORCL(stat_idx),
4161                            vsi->stat_offsets_loaded,
4162                            &oes->rx_bytes, &es->rx_bytes);
4163         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4164                            I40E_GLV_UPRCL(stat_idx),
4165                            vsi->stat_offsets_loaded,
4166                            &oes->rx_unicast, &es->rx_unicast);
4167         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4168                            I40E_GLV_MPRCL(stat_idx),
4169                            vsi->stat_offsets_loaded,
4170                            &oes->rx_multicast, &es->rx_multicast);
4171         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4172                            I40E_GLV_BPRCL(stat_idx),
4173                            vsi->stat_offsets_loaded,
4174                            &oes->rx_broadcast, &es->rx_broadcast);
4175
4176         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4177                            I40E_GLV_GOTCL(stat_idx),
4178                            vsi->stat_offsets_loaded,
4179                            &oes->tx_bytes, &es->tx_bytes);
4180         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4181                            I40E_GLV_UPTCL(stat_idx),
4182                            vsi->stat_offsets_loaded,
4183                            &oes->tx_unicast, &es->tx_unicast);
4184         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4185                            I40E_GLV_MPTCL(stat_idx),
4186                            vsi->stat_offsets_loaded,
4187                            &oes->tx_multicast, &es->tx_multicast);
4188         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4189                            I40E_GLV_BPTCL(stat_idx),
4190                            vsi->stat_offsets_loaded,
4191                            &oes->tx_broadcast, &es->tx_broadcast);
4192         vsi->stat_offsets_loaded = true;
4193
4194         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4195         for (i = 0; i < vsi->num_queues; i++)
4196                 tx_discards += vsi->queues[i].txr.br->br_drops;
4197
4198         /* Update ifnet stats */
4199         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4200                            es->rx_multicast +
4201                            es->rx_broadcast);
4202         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4203                            es->tx_multicast +
4204                            es->tx_broadcast);
4205         IXL_SET_IBYTES(vsi, es->rx_bytes);
4206         IXL_SET_OBYTES(vsi, es->tx_bytes);
4207         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4208         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4209
4210         IXL_SET_OERRORS(vsi, es->tx_errors);
4211         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4212         IXL_SET_OQDROPS(vsi, tx_discards);
4213         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4214         IXL_SET_COLLISIONS(vsi, 0);
4215 }
4216
4217 /**
4218  * Reset all of the stats for the given pf
4219  **/
4220 void ixl_pf_reset_stats(struct ixl_pf *pf)
4221 {
4222         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4223         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4224         pf->stat_offsets_loaded = false;
4225 }
4226
4227 /**
4228  * Resets all stats of the given vsi
4229  **/
4230 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4231 {
4232         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4233         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4234         vsi->stat_offsets_loaded = false;
4235 }
4236
4237 /**
4238  * Read and update a 48 bit stat from the hw
4239  *
4240  * Since the device stats are not reset at PFReset, they likely will not
4241  * be zeroed when the driver starts.  We'll save the first values read
4242  * and use them as offsets to be subtracted from the raw values in order
4243  * to report stats that count from zero.
4244  **/
4245 static void
4246 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4247         bool offset_loaded, u64 *offset, u64 *stat)
4248 {
4249         u64 new_data;
4250
4251 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4252         new_data = rd64(hw, loreg);
4253 #else
4254         /*
4255          * Use two rd32's instead of one rd64; FreeBSD versions before
4256          * 10 don't support 8 byte bus reads/writes.
4257          */
4258         new_data = rd32(hw, loreg);
4259         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4260 #endif
4261
4262         if (!offset_loaded)
4263                 *offset = new_data;
4264         if (new_data >= *offset)
4265                 *stat = new_data - *offset;
4266         else
4267                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4268         *stat &= 0xFFFFFFFFFFFFULL;
4269 }
4270
4271 /**
4272  * Read and update a 32 bit stat from the hw
4273  **/
4274 static void
4275 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4276         bool offset_loaded, u64 *offset, u64 *stat)
4277 {
4278         u32 new_data;
4279
4280         new_data = rd32(hw, reg);
4281         if (!offset_loaded)
4282                 *offset = new_data;
4283         if (new_data >= *offset)
4284                 *stat = (u32)(new_data - *offset);
4285         else
4286                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4287 }
4288
4289 /*
4290 ** Set flow control using sysctl:
4291 **      0 - off
4292 **      1 - rx pause
4293 **      2 - tx pause
4294 **      3 - full
4295 */
4296 static int
4297 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4298 {
4299         /*
4300          * TODO: ensure flow control is disabled if
4301          * priority flow control is enabled
4302          *
4303          * TODO: ensure tx CRC by hardware should be enabled
4304          * if tx flow control is enabled.
4305          */
4306         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4307         struct i40e_hw *hw = &pf->hw;
4308         device_t dev = pf->dev;
4309         int requested_fc = 0, error = 0;
4310         enum i40e_status_code aq_error = 0;
4311         u8 fc_aq_err = 0;
4312
4313         aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4314         if (aq_error) {
4315                 device_printf(dev,
4316                     "%s: Error retrieving link info from aq, %d\n",
4317                     __func__, aq_error);
4318                 return (EAGAIN);
4319         }
4320
4321         /* Read in new mode */
4322         requested_fc = hw->fc.current_mode;
4323         error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4324         if ((error) || (req->newptr == NULL))
4325                 return (error);
4326         if (requested_fc < 0 || requested_fc > 3) {
4327                 device_printf(dev,
4328                     "Invalid fc mode; valid modes are 0 through 3\n");
4329                 return (EINVAL);
4330         }
4331
4332         /*
4333         ** Changing flow control mode currently does not work on
4334         ** 40GBASE-CR4 PHYs
4335         */
4336         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4337             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4338                 device_printf(dev, "Changing flow control mode unsupported"
4339                     " on 40GBase-CR4 media.\n");
4340                 return (ENODEV);
4341         }
4342
4343         /* Set fc ability for port */
4344         hw->fc.requested_mode = requested_fc;
4345         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4346         if (aq_error) {
4347                 device_printf(dev,
4348                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4349                     __func__, aq_error, fc_aq_err);
4350                 return (EAGAIN);
4351         }
4352
4353         if (hw->fc.current_mode != hw->fc.requested_mode) {
4354                 device_printf(dev, "%s: FC set failure:\n", __func__);
4355                 device_printf(dev, "%s: Current: %s / Requested: %s\n",
4356                     __func__,
4357                     ixl_fc_string[hw->fc.current_mode],
4358                     ixl_fc_string[hw->fc.requested_mode]);
4359         }
4360
4361         return (0);
4362 }
4363
4364 static int
4365 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4366 {
4367         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4368         struct i40e_hw *hw = &pf->hw;
4369         int error = 0, index = 0;
4370
4371         char *speeds[] = {
4372                 "Unknown",
4373                 "100M",
4374                 "1G",
4375                 "10G",
4376                 "40G",
4377                 "20G"
4378         };
4379
4380         ixl_update_link_status(pf);
4381
4382         switch (hw->phy.link_info.link_speed) {
4383         case I40E_LINK_SPEED_100MB:
4384                 index = 1;
4385                 break;
4386         case I40E_LINK_SPEED_1GB:
4387                 index = 2;
4388                 break;
4389         case I40E_LINK_SPEED_10GB:
4390                 index = 3;
4391                 break;
4392         case I40E_LINK_SPEED_40GB:
4393                 index = 4;
4394                 break;
4395         case I40E_LINK_SPEED_20GB:
4396                 index = 5;
4397                 break;
4398         case I40E_LINK_SPEED_UNKNOWN:
4399         default:
4400                 index = 0;
4401                 break;
4402         }
4403
4404         error = sysctl_handle_string(oidp, speeds[index],
4405             strlen(speeds[index]), req);
4406         return (error);
4407 }
4408
4409 static int
4410 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4411 {
4412         struct i40e_hw *hw = &pf->hw;
4413         device_t dev = pf->dev;
4414         struct i40e_aq_get_phy_abilities_resp abilities;
4415         struct i40e_aq_set_phy_config config;
4416         enum i40e_status_code aq_error = 0;
4417
4418         /* Get current capability information */
4419         aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4420         if (aq_error) {
4421                 device_printf(dev, "%s: Error getting phy capabilities %d,"
4422                     " aq error: %d\n", __func__, aq_error,
4423                     hw->aq.asq_last_status);
4424                 return (EAGAIN);
4425         }
4426
4427         /* Prepare new config */
4428         bzero(&config, sizeof(config));
4429         config.phy_type = abilities.phy_type;
4430         config.abilities = abilities.abilities
4431             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4432         config.eee_capability = abilities.eee_capability;
4433         config.eeer = abilities.eeer_val;
4434         config.low_power_ctrl = abilities.d3_lpan;
4435         /* Translate into aq cmd link_speed */
4436         if (speeds & 0x4)
4437                 config.link_speed |= I40E_LINK_SPEED_10GB;
4438         if (speeds & 0x2)
4439                 config.link_speed |= I40E_LINK_SPEED_1GB;
4440         if (speeds & 0x1)
4441                 config.link_speed |= I40E_LINK_SPEED_100MB;
4442
4443         /* Do aq command & restart link */
4444         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4445         if (aq_error) {
4446                 device_printf(dev, "%s: Error setting new phy config %d,"
4447                     " aq error: %d\n", __func__, aq_error,
4448                     hw->aq.asq_last_status);
4449                 return (EAGAIN);
4450         }
4451
4452         /*
4453         ** This seems a bit heavy handed, but we
4454         ** need to get a reinit on some devices
4455         */
4456         IXL_PF_LOCK(pf);
4457         ixl_stop(pf);
4458         ixl_init_locked(pf);
4459         IXL_PF_UNLOCK(pf);
4460
4461         return (0);
4462 }
4463
4464 /*
4465 ** Control link advertise speed:
4466 **      Flags:
4467 **      0x1 - advertise 100 Mb
4468 **      0x2 - advertise 1G
4469 **      0x4 - advertise 10G
4470 **
4471 ** Does not work on 40G devices.
4472 */
4473 static int
4474 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4475 {
4476         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4477         struct i40e_hw *hw = &pf->hw;
4478         device_t dev = pf->dev;
4479         int requested_ls = 0;
4480         int error = 0;
4481
4482         /*
4483         ** FW doesn't support changing advertised speed
4484         ** for 40G devices; speed is always 40G.
4485         */
4486         if (i40e_is_40G_device(hw->device_id))
4487                 return (ENODEV);
4488
4489         /* Read in new mode */
4490         requested_ls = pf->advertised_speed;
4491         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4492         if ((error) || (req->newptr == NULL))
4493                 return (error);
4494         if (requested_ls < 1 || requested_ls > 7) {
4495                 device_printf(dev,
4496                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4497                 return (EINVAL);
4498         }
4499
4500         /* Exit if no change */
4501         if (pf->advertised_speed == requested_ls)
4502                 return (0);
4503
4504         error = ixl_set_advertised_speeds(pf, requested_ls);
4505         if (error)
4506                 return (error);
4507
4508         pf->advertised_speed = requested_ls;
4509         ixl_update_link_status(pf);
4510         return (0);
4511 }
4512
4513 /*
4514 ** Get the width and transaction speed of
4515 ** the bus this adapter is plugged into.
4516 */
4517 static u16
4518 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4519 {
4520         u16                     link;
4521         u32                     offset;
4522                 
4523                 
4524         /* Get the PCI Express Capabilities offset */
4525         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4526
4527         /* ...and read the Link Status Register */
4528         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4529
4530         switch (link & I40E_PCI_LINK_WIDTH) {
4531         case I40E_PCI_LINK_WIDTH_1:
4532                 hw->bus.width = i40e_bus_width_pcie_x1;
4533                 break;
4534         case I40E_PCI_LINK_WIDTH_2:
4535                 hw->bus.width = i40e_bus_width_pcie_x2;
4536                 break;
4537         case I40E_PCI_LINK_WIDTH_4:
4538                 hw->bus.width = i40e_bus_width_pcie_x4;
4539                 break;
4540         case I40E_PCI_LINK_WIDTH_8:
4541                 hw->bus.width = i40e_bus_width_pcie_x8;
4542                 break;
4543         default:
4544                 hw->bus.width = i40e_bus_width_unknown;
4545                 break;
4546         }
4547
4548         switch (link & I40E_PCI_LINK_SPEED) {
4549         case I40E_PCI_LINK_SPEED_2500:
4550                 hw->bus.speed = i40e_bus_speed_2500;
4551                 break;
4552         case I40E_PCI_LINK_SPEED_5000:
4553                 hw->bus.speed = i40e_bus_speed_5000;
4554                 break;
4555         case I40E_PCI_LINK_SPEED_8000:
4556                 hw->bus.speed = i40e_bus_speed_8000;
4557                 break;
4558         default:
4559                 hw->bus.speed = i40e_bus_speed_unknown;
4560                 break;
4561         }
4562
4563
4564         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4565             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4566             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4567             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4568             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4569             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4570             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4571             ("Unknown"));
4572
4573         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4574             (hw->bus.speed < i40e_bus_speed_8000)) {
4575                 device_printf(dev, "PCI-Express bandwidth available"
4576                     " for this device\n     is not sufficient for"
4577                     " normal operation.\n");
4578                 device_printf(dev, "For expected performance a x8 "
4579                     "PCIE Gen3 slot is required.\n");
4580         }
4581
4582         return (link);
4583 }
4584
4585 static int
4586 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4587 {
4588         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4589         struct i40e_hw  *hw = &pf->hw;
4590         char            buf[32];
4591
4592         snprintf(buf, sizeof(buf),
4593             "f%d.%d a%d.%d n%02x.%02x e%08x",
4594             hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4595             hw->aq.api_maj_ver, hw->aq.api_min_ver,
4596             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4597             IXL_NVM_VERSION_HI_SHIFT,
4598             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4599             IXL_NVM_VERSION_LO_SHIFT,
4600             hw->nvm.eetrack);
4601         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4602 }
4603
4604
4605 #ifdef IXL_DEBUG_SYSCTL
4606 static int
4607 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4608 {
4609         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4610         struct i40e_hw *hw = &pf->hw;
4611         struct i40e_link_status link_status;
4612         char buf[512];
4613
4614         enum i40e_status_code aq_error = 0;
4615
4616         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4617         if (aq_error) {
4618                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4619                 return (EPERM);
4620         }
4621
4622         sprintf(buf, "\n"
4623             "PHY Type : %#04x\n"
4624             "Speed    : %#04x\n" 
4625             "Link info: %#04x\n" 
4626             "AN info  : %#04x\n" 
4627             "Ext info : %#04x", 
4628             link_status.phy_type, link_status.link_speed, 
4629             link_status.link_info, link_status.an_info,
4630             link_status.ext_info);
4631
4632         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4633 }
4634
4635 static int
4636 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4637 {
4638         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4639         struct i40e_hw *hw = &pf->hw;
4640         struct i40e_aq_get_phy_abilities_resp abilities_resp;
4641         char buf[512];
4642
4643         enum i40e_status_code aq_error = 0;
4644
4645         // TODO: Print out list of qualified modules as well?
4646         aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4647         if (aq_error) {
4648                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4649                 return (EPERM);
4650         }
4651
4652         sprintf(buf, "\n"
4653             "PHY Type : %#010x\n"
4654             "Speed    : %#04x\n" 
4655             "Abilities: %#04x\n" 
4656             "EEE cap  : %#06x\n" 
4657             "EEER reg : %#010x\n" 
4658             "D3 Lpan  : %#04x",
4659             abilities_resp.phy_type, abilities_resp.link_speed, 
4660             abilities_resp.abilities, abilities_resp.eee_capability,
4661             abilities_resp.eeer_val, abilities_resp.d3_lpan);
4662
4663         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4664 }
4665
4666 static int
4667 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4668 {
4669         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4670         struct ixl_vsi *vsi = &pf->vsi;
4671         struct ixl_mac_filter *f;
4672         char *buf, *buf_i;
4673
4674         int error = 0;
4675         int ftl_len = 0;
4676         int ftl_counter = 0;
4677         int buf_len = 0;
4678         int entry_len = 42;
4679
4680         SLIST_FOREACH(f, &vsi->ftl, next) {
4681                 ftl_len++;
4682         }
4683
4684         if (ftl_len < 1) {
4685                 sysctl_handle_string(oidp, "(none)", 6, req);
4686                 return (0);
4687         }
4688
4689         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4690         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4691
4692         sprintf(buf_i++, "\n");
4693         SLIST_FOREACH(f, &vsi->ftl, next) {
4694                 sprintf(buf_i,
4695                     MAC_FORMAT ", vlan %4d, flags %#06x",
4696                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4697                 buf_i += entry_len;
4698                 /* don't print '\n' for last entry */
4699                 if (++ftl_counter != ftl_len) {
4700                         sprintf(buf_i, "\n");
4701                         buf_i++;
4702                 }
4703         }
4704
4705         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4706         if (error)
4707                 printf("sysctl error: %d\n", error);
4708         free(buf, M_DEVBUF);
4709         return error;
4710 }
4711
4712 #define IXL_SW_RES_SIZE 0x14
4713 static int
4714 ixl_res_alloc_cmp(const void *a, const void *b)
4715 {
4716         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4717         one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4718         two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4719
4720         return ((int)one->resource_type - (int)two->resource_type);
4721 }
4722
4723 static int
4724 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4725 {
4726         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4727         struct i40e_hw *hw = &pf->hw;
4728         device_t dev = pf->dev;
4729         struct sbuf *buf;
4730         int error = 0;
4731
4732         u8 num_entries;
4733         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4734
4735         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4736         if (!buf) {
4737                 device_printf(dev, "Could not allocate sbuf for output.\n");
4738                 return (ENOMEM);
4739         }
4740
4741         bzero(resp, sizeof(resp));
4742         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4743                                 resp,
4744                                 IXL_SW_RES_SIZE,
4745                                 NULL);
4746         if (error) {
4747                 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4748                     __func__, error, hw->aq.asq_last_status);
4749                 sbuf_delete(buf);
4750                 return error;
4751         }
4752
4753         /* Sort entries by type for display */
4754         qsort(resp, num_entries,
4755             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4756             &ixl_res_alloc_cmp);
4757
4758         sbuf_cat(buf, "\n");
4759         sbuf_printf(buf, "# of entries: %d\n", num_entries);
4760         sbuf_printf(buf,
4761             "Type | Guaranteed | Total | Used   | Un-allocated\n"
4762             "     | (this)     | (all) | (this) | (all)       \n");
4763         for (int i = 0; i < num_entries; i++) {
4764                 sbuf_printf(buf,
4765                     "%#4x | %10d   %5d   %6d   %12d",
4766                     resp[i].resource_type,
4767                     resp[i].guaranteed,
4768                     resp[i].total,
4769                     resp[i].used,
4770                     resp[i].total_unalloced);
4771                 if (i < num_entries - 1)
4772                         sbuf_cat(buf, "\n");
4773         }
4774
4775         error = sbuf_finish(buf);
4776         sbuf_delete(buf);
4777
4778         return (error);
4779 }
4780
4781 /*
4782 ** Caller must init and delete sbuf; this function will clear and
4783 ** finish it for caller.
4784 */
4785 static char *
4786 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4787 {
4788         sbuf_clear(s);
4789
4790         if (seid == 0 && uplink)
4791                 sbuf_cat(s, "Network");
4792         else if (seid == 0)
4793                 sbuf_cat(s, "Host");
4794         else if (seid == 1)
4795                 sbuf_cat(s, "EMP");
4796         else if (seid <= 5)
4797                 sbuf_printf(s, "MAC %d", seid - 2);
4798         else if (seid <= 15)
4799                 sbuf_cat(s, "Reserved");
4800         else if (seid <= 31)
4801                 sbuf_printf(s, "PF %d", seid - 16);
4802         else if (seid <= 159)
4803                 sbuf_printf(s, "VF %d", seid - 32);
4804         else if (seid <= 287)
4805                 sbuf_cat(s, "Reserved");
4806         else if (seid <= 511)
4807                 sbuf_cat(s, "Other"); // for other structures
4808         else if (seid <= 895)
4809                 sbuf_printf(s, "VSI %d", seid - 512);
4810         else if (seid <= 1023)
4811                 sbuf_printf(s, "Reserved");
4812         else
4813                 sbuf_cat(s, "Invalid");
4814
4815         sbuf_finish(s);
4816         return sbuf_data(s);
4817 }
4818
4819 static int
4820 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4821 {
4822         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4823         struct i40e_hw *hw = &pf->hw;
4824         device_t dev = pf->dev;
4825         struct sbuf *buf;
4826         struct sbuf *nmbuf;
4827         int error = 0;
4828         u8 aq_buf[I40E_AQ_LARGE_BUF];
4829
4830         u16 next = 0;
4831         struct i40e_aqc_get_switch_config_resp *sw_config;
4832         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4833
4834         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4835         if (!buf) {
4836                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4837                 return (ENOMEM);
4838         }
4839
4840         error = i40e_aq_get_switch_config(hw, sw_config,
4841             sizeof(aq_buf), &next, NULL);
4842         if (error) {
4843                 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4844                     __func__, error, hw->aq.asq_last_status);
4845                 sbuf_delete(buf);
4846                 return error;
4847         }
4848
4849         nmbuf = sbuf_new_auto();
4850         if (!nmbuf) {
4851                 device_printf(dev, "Could not allocate sbuf for name output.\n");
4852                 return (ENOMEM);
4853         }
4854
4855         sbuf_cat(buf, "\n");
4856         // Assuming <= 255 elements in switch
4857         sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4858         /* Exclude:
4859         ** Revision -- all elements are revision 1 for now
4860         */
4861         sbuf_printf(buf,
4862             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4863             "                |          |          | (uplink)\n");
4864         for (int i = 0; i < sw_config->header.num_reported; i++) {
4865                 // "%4d (%8s) | %8s   %8s   %#8x",
4866                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4867                 sbuf_cat(buf, " ");
4868                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4869                 sbuf_cat(buf, " | ");
4870                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4871                 sbuf_cat(buf, "   ");
4872                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4873                 sbuf_cat(buf, "   ");
4874                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4875                 if (i < sw_config->header.num_reported - 1)
4876                         sbuf_cat(buf, "\n");
4877         }
4878         sbuf_delete(nmbuf);
4879
4880         error = sbuf_finish(buf);
4881         sbuf_delete(buf);
4882
4883         return (error);
4884 }
4885
4886 /*
4887 ** Dump TX desc given index.
4888 ** Doesn't work; don't use.
4889 ** TODO: Also needs a queue index input!
4890 **/
4891 static int
4892 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4893 {
4894         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4895         device_t dev = pf->dev;
4896         struct sbuf *buf;
4897         int error = 0;
4898
4899         u16 desc_idx = 0;
4900
4901         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4902         if (!buf) {
4903                 device_printf(dev, "Could not allocate sbuf for output.\n");
4904                 return (ENOMEM);
4905         }
4906
4907         /* Read in index */
4908         error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4909         if (error)
4910                 return (error);
4911         if (req->newptr == NULL)
4912                 return (EIO); // fix
4913         if (desc_idx > 1024) { // fix
4914                 device_printf(dev,
4915                     "Invalid descriptor index, needs to be < 1024\n"); // fix
4916                 return (EINVAL);
4917         }
4918
4919         // Don't use this sysctl yet
4920         if (TRUE)
4921                 return (ENODEV);
4922
4923         sbuf_cat(buf, "\n");
4924
4925         // set to queue 1?
4926         struct ixl_queue *que = pf->vsi.queues;
4927         struct tx_ring *txr = &(que[1].txr);
4928         struct i40e_tx_desc *txd = &txr->base[desc_idx];
4929
4930         sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4931         sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4932         sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4933
4934         error = sbuf_finish(buf);
4935         if (error) {
4936                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4937                 sbuf_delete(buf);
4938                 return error;
4939         }
4940
4941         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4942         if (error)
4943                 device_printf(dev, "sysctl error: %d\n", error);
4944         sbuf_delete(buf);
4945         return error;
4946 }
4947 #endif /* IXL_DEBUG_SYSCTL */
4948