]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixl/if_ixl.c
MFC r279232: Add native netmap support to ixl
[FreeBSD/stable/10.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #endif
39
40 #include "ixl.h"
41 #include "ixl_pf.h"
42
43 #ifdef RSS
44 #include <net/rss_config.h>
45 #endif
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 char ixl_driver_version[] = "1.3.6";
51
52 /*********************************************************************
53  *  PCI Device ID Table
54  *
55  *  Used by probe to select devices to load on
56  *  Last field stores an index into ixl_strings
57  *  Last entry must be all 0s
58  *
59  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60  *********************************************************************/
61
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
63 {
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72         /* required last entry */
73         {0, 0, 0, 0, 0}
74 };
75
76 /*********************************************************************
77  *  Table of branding strings
78  *********************************************************************/
79
80 static char    *ixl_strings[] = {
81         "Intel(R) Ethernet Connection XL710 Driver"
82 };
83
84
85 /*********************************************************************
86  *  Function prototypes
87  *********************************************************************/
88 static int      ixl_probe(device_t);
89 static int      ixl_attach(device_t);
90 static int      ixl_detach(device_t);
91 static int      ixl_shutdown(device_t);
92 static int      ixl_get_hw_capabilities(struct ixl_pf *);
93 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
94 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
95 static void     ixl_init(void *);
96 static void     ixl_init_locked(struct ixl_pf *);
97 static void     ixl_stop(struct ixl_pf *);
98 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
99 static int      ixl_media_change(struct ifnet *);
100 static void     ixl_update_link_status(struct ixl_pf *);
101 static int      ixl_allocate_pci_resources(struct ixl_pf *);
102 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
103 static int      ixl_setup_stations(struct ixl_pf *);
104 static int      ixl_switch_config(struct ixl_pf *);
105 static int      ixl_initialize_vsi(struct ixl_vsi *);
106 static int      ixl_assign_vsi_msix(struct ixl_pf *);
107 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
108 static int      ixl_init_msix(struct ixl_pf *);
109 static void     ixl_configure_msix(struct ixl_pf *);
110 static void     ixl_configure_itr(struct ixl_pf *);
111 static void     ixl_configure_legacy(struct ixl_pf *);
112 static void     ixl_free_pci_resources(struct ixl_pf *);
113 static void     ixl_local_timer(void *);
114 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
115 static bool     ixl_config_link(struct i40e_hw *);
116 static void     ixl_config_rss(struct ixl_vsi *);
117 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
118 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
119 static int      ixl_set_advertised_speeds(struct ixl_pf *, int);
120
121 static void     ixl_enable_rings(struct ixl_vsi *);
122 static void     ixl_disable_rings(struct ixl_vsi *);
123 static void     ixl_enable_intr(struct ixl_vsi *);
124 static void     ixl_disable_intr(struct ixl_vsi *);
125
126 static void     ixl_enable_adminq(struct i40e_hw *);
127 static void     ixl_disable_adminq(struct i40e_hw *);
128 static void     ixl_enable_queue(struct i40e_hw *, int);
129 static void     ixl_disable_queue(struct i40e_hw *, int);
130 static void     ixl_enable_legacy(struct i40e_hw *);
131 static void     ixl_disable_legacy(struct i40e_hw *);
132
133 static void     ixl_set_promisc(struct ixl_vsi *);
134 static void     ixl_add_multi(struct ixl_vsi *);
135 static void     ixl_del_multi(struct ixl_vsi *);
136 static void     ixl_register_vlan(void *, struct ifnet *, u16);
137 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
138 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
139
140 static void     ixl_init_filters(struct ixl_vsi *);
141 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
142 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
143 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
144 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
145 static struct ixl_mac_filter *
146                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
147 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
148
149 /* Sysctl debug interface */
150 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
151 static void     ixl_print_debug_info(struct ixl_pf *);
152
153 /* The MSI/X Interrupt handlers */
154 static void     ixl_intr(void *);
155 static void     ixl_msix_que(void *);
156 static void     ixl_msix_adminq(void *);
157 static void     ixl_handle_mdd_event(struct ixl_pf *);
158
159 /* Deferred interrupt tasklets */
160 static void     ixl_do_adminq(void *, int);
161
162 /* Sysctl handlers */
163 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
164 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
165 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
166 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
167
168 /* Statistics */
169 static void     ixl_add_hw_stats(struct ixl_pf *);
170 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
171                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
172 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
173                     struct sysctl_oid_list *,
174                     struct i40e_eth_stats *);
175 static void     ixl_update_stats_counters(struct ixl_pf *);
176 static void     ixl_update_eth_stats(struct ixl_vsi *);
177 static void     ixl_pf_reset_stats(struct ixl_pf *);
178 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
179 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
180                     u64 *, u64 *);
181 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
182                     u64 *, u64 *);
183
184 #ifdef IXL_DEBUG_SYSCTL
185 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
186 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
187 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
188 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
189 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
190 static int      ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
191 #endif
192
193 /*********************************************************************
194  *  FreeBSD Device Interface Entry Points
195  *********************************************************************/
196
197 static device_method_t ixl_methods[] = {
198         /* Device interface */
199         DEVMETHOD(device_probe, ixl_probe),
200         DEVMETHOD(device_attach, ixl_attach),
201         DEVMETHOD(device_detach, ixl_detach),
202         DEVMETHOD(device_shutdown, ixl_shutdown),
203         {0, 0}
204 };
205
206 static driver_t ixl_driver = {
207         "ixl", ixl_methods, sizeof(struct ixl_pf),
208 };
209
210 devclass_t ixl_devclass;
211 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
212
213 MODULE_DEPEND(ixl, pci, 1, 1, 1);
214 MODULE_DEPEND(ixl, ether, 1, 1, 1);
215 #ifdef DEV_NETMAP
216 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
217 #endif /* DEV_NETMAP */
218
219 /*
220 ** Global reset mutex
221 */
222 static struct mtx ixl_reset_mtx;
223
224 /*
225 ** TUNEABLE PARAMETERS:
226 */
227
228 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
229                    "IXL driver parameters");
230
231 /*
232  * MSIX should be the default for best performance,
233  * but this allows it to be forced off for testing.
234  */
235 static int ixl_enable_msix = 1;
236 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
237 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
238     "Enable MSI-X interrupts");
239
240 /*
241 ** Number of descriptors per ring:
242 **   - TX and RX are the same size
243 */
244 static int ixl_ringsz = DEFAULT_RING;
245 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
247     &ixl_ringsz, 0, "Descriptor Ring Size");
248
249 /* 
250 ** This can be set manually, if left as 0 the
251 ** number of queues will be calculated based
252 ** on cpus and msix vectors available.
253 */
254 int ixl_max_queues = 0;
255 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
256 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
257     &ixl_max_queues, 0, "Number of Queues");
258
259 /*
260 ** Controls for Interrupt Throttling 
261 **      - true/false for dynamic adjustment
262 **      - default values for static ITR
263 */
264 int ixl_dynamic_rx_itr = 0;
265 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
267     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
268
269 int ixl_dynamic_tx_itr = 0;
270 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
272     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
273
274 int ixl_rx_itr = IXL_ITR_8K;
275 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
277     &ixl_rx_itr, 0, "RX Interrupt Rate");
278
279 int ixl_tx_itr = IXL_ITR_4K;
280 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
282     &ixl_tx_itr, 0, "TX Interrupt Rate");
283
284 #ifdef IXL_FDIR
285 static int ixl_enable_fdir = 1;
286 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
287 /* Rate at which we sample */
288 int ixl_atr_rate = 20;
289 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
290 #endif
291
292 #ifdef DEV_NETMAP
293 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
294 #include <dev/netmap/if_ixl_netmap.h>
295 #endif /* DEV_NETMAP */
296
297 static char *ixl_fc_string[6] = {
298         "None",
299         "Rx",
300         "Tx",
301         "Full",
302         "Priority",
303         "Default"
304 };
305
306
307 /*********************************************************************
308  *  Device identification routine
309  *
310  *  ixl_probe determines if the driver should be loaded on
311  *  the hardware based on PCI vendor/device id of the device.
312  *
313  *  return BUS_PROBE_DEFAULT on success, positive on failure
314  *********************************************************************/
315
316 static int
317 ixl_probe(device_t dev)
318 {
319         ixl_vendor_info_t *ent;
320
321         u16     pci_vendor_id, pci_device_id;
322         u16     pci_subvendor_id, pci_subdevice_id;
323         char    device_name[256];
324         static bool lock_init = FALSE;
325
326         INIT_DEBUGOUT("ixl_probe: begin");
327
328         pci_vendor_id = pci_get_vendor(dev);
329         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
330                 return (ENXIO);
331
332         pci_device_id = pci_get_device(dev);
333         pci_subvendor_id = pci_get_subvendor(dev);
334         pci_subdevice_id = pci_get_subdevice(dev);
335
336         ent = ixl_vendor_info_array;
337         while (ent->vendor_id != 0) {
338                 if ((pci_vendor_id == ent->vendor_id) &&
339                     (pci_device_id == ent->device_id) &&
340
341                     ((pci_subvendor_id == ent->subvendor_id) ||
342                      (ent->subvendor_id == 0)) &&
343
344                     ((pci_subdevice_id == ent->subdevice_id) ||
345                      (ent->subdevice_id == 0))) {
346                         sprintf(device_name, "%s, Version - %s",
347                                 ixl_strings[ent->index],
348                                 ixl_driver_version);
349                         device_set_desc_copy(dev, device_name);
350                         /* One shot mutex init */
351                         if (lock_init == FALSE) {
352                                 lock_init = TRUE;
353                                 mtx_init(&ixl_reset_mtx,
354                                     "ixl_reset",
355                                     "IXL RESET Lock", MTX_DEF);
356                         }
357                         return (BUS_PROBE_DEFAULT);
358                 }
359                 ent++;
360         }
361         return (ENXIO);
362 }
363
364 /*********************************************************************
365  *  Device initialization routine
366  *
367  *  The attach entry point is called when the driver is being loaded.
368  *  This routine identifies the type of hardware, allocates all resources
369  *  and initializes the hardware.
370  *
371  *  return 0 on success, positive on failure
372  *********************************************************************/
373
374 static int
375 ixl_attach(device_t dev)
376 {
377         struct ixl_pf   *pf;
378         struct i40e_hw  *hw;
379         struct ixl_vsi *vsi;
380         u16             bus;
381         int             error = 0;
382
383         INIT_DEBUGOUT("ixl_attach: begin");
384
385         /* Allocate, clear, and link in our primary soft structure */
386         pf = device_get_softc(dev);
387         pf->dev = pf->osdep.dev = dev;
388         hw = &pf->hw;
389
390         /*
391         ** Note this assumes we have a single embedded VSI,
392         ** this could be enhanced later to allocate multiple
393         */
394         vsi = &pf->vsi;
395         vsi->dev = pf->dev;
396
397         /* Core Lock Init*/
398         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
399
400         /* Set up the timer callout */
401         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
402
403         /* Set up sysctls */
404         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
407             pf, 0, ixl_set_flowcntl, "I", "Flow Control");
408
409         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
410             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
412             pf, 0, ixl_set_advertise, "I", "Advertised Speed");
413
414         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
415             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
417             pf, 0, ixl_current_speed, "A", "Current Port Speed");
418
419         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
422             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
423
424         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
425             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426             OID_AUTO, "rx_itr", CTLFLAG_RW,
427             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
428
429         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
430             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
431             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
432             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
433
434         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
435             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
436             OID_AUTO, "tx_itr", CTLFLAG_RW,
437             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
438
439         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
440             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
442             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
443
444 #ifdef IXL_DEBUG_SYSCTL
445         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
448             pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
449
450         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
453             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
454
455         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
458             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
459
460         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
461             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
463             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
464
465         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
466             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
468             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
469
470         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
471             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472             OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
473             pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
474 #endif
475
476         /* Save off the PCI information */
477         hw->vendor_id = pci_get_vendor(dev);
478         hw->device_id = pci_get_device(dev);
479         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
480         hw->subsystem_vendor_id =
481             pci_read_config(dev, PCIR_SUBVEND_0, 2);
482         hw->subsystem_device_id =
483             pci_read_config(dev, PCIR_SUBDEV_0, 2);
484
485         hw->bus.device = pci_get_slot(dev);
486         hw->bus.func = pci_get_function(dev);
487
488         /* Do PCI setup - map BAR0, etc */
489         if (ixl_allocate_pci_resources(pf)) {
490                 device_printf(dev, "Allocation of PCI resources failed\n");
491                 error = ENXIO;
492                 goto err_out;
493         }
494
495         /* Create for initial debugging use */
496         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
497             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
499             ixl_debug_info, "I", "Debug Information");
500
501
502         /* Establish a clean starting point */
503         i40e_clear_hw(hw);
504         error = i40e_pf_reset(hw);
505         if (error) {
506                 device_printf(dev,"PF reset failure %x\n", error);
507                 error = EIO;
508                 goto err_out;
509         }
510
511         /* Set admin queue parameters */
512         hw->aq.num_arq_entries = IXL_AQ_LEN;
513         hw->aq.num_asq_entries = IXL_AQ_LEN;
514         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
515         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
516
517         /* Initialize the shared code */
518         error = i40e_init_shared_code(hw);
519         if (error) {
520                 device_printf(dev,"Unable to initialize the shared code\n");
521                 error = EIO;
522                 goto err_out;
523         }
524
525         /* Set up the admin queue */
526         error = i40e_init_adminq(hw);
527         if (error) {
528                 device_printf(dev, "The driver for the device stopped "
529                     "because the NVM image is newer than expected.\n"
530                     "You must install the most recent version of "
531                     " the network driver.\n");
532                 goto err_out;
533         }
534         device_printf(dev, "%s\n", ixl_fw_version_str(hw));
535
536         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
537             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
538                 device_printf(dev, "The driver for the device detected "
539                     "a newer version of the NVM image than expected.\n"
540                     "Please install the most recent version of the network driver.\n");
541         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
542             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
543                 device_printf(dev, "The driver for the device detected "
544                     "an older version of the NVM image than expected.\n"
545                     "Please update the NVM image.\n");
546
547         /* Clear PXE mode */
548         i40e_clear_pxe_mode(hw);
549
550         /* Get capabilities from the device */
551         error = ixl_get_hw_capabilities(pf);
552         if (error) {
553                 device_printf(dev, "HW capabilities failure!\n");
554                 goto err_get_cap;
555         }
556
557         /* Set up host memory cache */
558         error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
559         if (error) {
560                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
561                 goto err_get_cap;
562         }
563
564         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
565         if (error) {
566                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
567                 goto err_mac_hmc;
568         }
569
570         /* Disable LLDP from the firmware */
571         i40e_aq_stop_lldp(hw, TRUE, NULL);
572
573         i40e_get_mac_addr(hw, hw->mac.addr);
574         error = i40e_validate_mac_addr(hw->mac.addr);
575         if (error) {
576                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
577                 goto err_mac_hmc;
578         }
579         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
580         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
581
582         /* Set up VSI and queues */
583         if (ixl_setup_stations(pf) != 0) { 
584                 device_printf(dev, "setup stations failed!\n");
585                 error = ENOMEM;
586                 goto err_mac_hmc;
587         }
588
589         /* Initialize mac filter list for VSI */
590         SLIST_INIT(&vsi->ftl);
591
592         /* Set up interrupt routing here */
593         if (pf->msix > 1)
594                 error = ixl_assign_vsi_msix(pf);
595         else
596                 error = ixl_assign_vsi_legacy(pf);
597         if (error) 
598                 goto err_late;
599
600         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
601             (hw->aq.fw_maj_ver < 4)) {
602                 i40e_msec_delay(75);
603                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
604                 if (error)
605                         device_printf(dev, "link restart failed, aq_err=%d\n",
606                             pf->hw.aq.asq_last_status);
607         }
608
609         /* Determine link state */
610         vsi->link_up = ixl_config_link(hw);
611
612         /* Report if Unqualified modules are found */
613         if ((vsi->link_up == FALSE) &&
614             (pf->hw.phy.link_info.link_info &
615             I40E_AQ_MEDIA_AVAILABLE) &&
616             (!(pf->hw.phy.link_info.an_info &
617             I40E_AQ_QUALIFIED_MODULE)))
618                 device_printf(dev, "Link failed because "
619                     "an unqualified module was detected\n");
620
621         /* Setup OS specific network interface */
622         if (ixl_setup_interface(dev, vsi) != 0) {
623                 device_printf(dev, "interface setup failed!\n");
624                 error = EIO;
625                 goto err_late;
626         }
627
628         error = ixl_switch_config(pf);
629         if (error) {
630                 device_printf(dev, "Initial switch config failed: %d\n", error);
631                 goto err_mac_hmc;
632         }
633
634         /* Limit phy interrupts to link and modules failure */
635         error = i40e_aq_set_phy_int_mask(hw,
636             I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
637         if (error)
638                 device_printf(dev, "set phy mask failed: %d\n", error);
639
640         /* Get the bus configuration and set the shared code */
641         bus = ixl_get_bus_info(hw, dev);
642         i40e_set_pci_config_data(hw, bus);
643
644         /* Initialize statistics */
645         ixl_pf_reset_stats(pf);
646         ixl_update_stats_counters(pf);
647         ixl_add_hw_stats(pf);
648
649         /* Register for VLAN events */
650         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
651             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
652         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
653             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
654
655
656 #ifdef DEV_NETMAP
657         ixl_netmap_attach(vsi);
658 #endif /* DEV_NETMAP */
659         INIT_DEBUGOUT("ixl_attach: end");
660         return (0);
661
662 err_late:
663         if (vsi->ifp != NULL)
664                 if_free(vsi->ifp);
665 err_mac_hmc:
666         i40e_shutdown_lan_hmc(hw);
667 err_get_cap:
668         i40e_shutdown_adminq(hw);
669 err_out:
670         ixl_free_pci_resources(pf);
671         ixl_free_vsi(vsi);
672         IXL_PF_LOCK_DESTROY(pf);
673         return (error);
674 }
675
676 /*********************************************************************
677  *  Device removal routine
678  *
679  *  The detach entry point is called when the driver is being removed.
680  *  This routine stops the adapter and deallocates all the resources
681  *  that were allocated for driver operation.
682  *
683  *  return 0 on success, positive on failure
684  *********************************************************************/
685
686 static int
687 ixl_detach(device_t dev)
688 {
689         struct ixl_pf           *pf = device_get_softc(dev);
690         struct i40e_hw          *hw = &pf->hw;
691         struct ixl_vsi          *vsi = &pf->vsi;
692         struct ixl_queue        *que = vsi->queues;
693         i40e_status             status;
694
695         INIT_DEBUGOUT("ixl_detach: begin");
696
697         /* Make sure VLANS are not using driver */
698         if (vsi->ifp->if_vlantrunk != NULL) {
699                 device_printf(dev,"Vlan in use, detach first\n");
700                 return (EBUSY);
701         }
702
703         ether_ifdetach(vsi->ifp);
704         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
705                 IXL_PF_LOCK(pf);
706                 ixl_stop(pf);
707                 IXL_PF_UNLOCK(pf);
708         }
709
710         for (int i = 0; i < vsi->num_queues; i++, que++) {
711                 if (que->tq) {
712                         taskqueue_drain(que->tq, &que->task);
713                         taskqueue_drain(que->tq, &que->tx_task);
714                         taskqueue_free(que->tq);
715                 }
716         }
717
718         /* Shutdown LAN HMC */
719         status = i40e_shutdown_lan_hmc(hw);
720         if (status)
721                 device_printf(dev,
722                     "Shutdown LAN HMC failed with code %d\n", status);
723
724         /* Shutdown admin queue */
725         status = i40e_shutdown_adminq(hw);
726         if (status)
727                 device_printf(dev,
728                     "Shutdown Admin queue failed with code %d\n", status);
729
730         /* Unregister VLAN events */
731         if (vsi->vlan_attach != NULL)
732                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
733         if (vsi->vlan_detach != NULL)
734                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
735
736         callout_drain(&pf->timer);
737 #ifdef DEV_NETMAP
738         netmap_detach(vsi->ifp);
739 #endif /* DEV_NETMAP */
740
741
742         ixl_free_pci_resources(pf);
743         bus_generic_detach(dev);
744         if_free(vsi->ifp);
745         ixl_free_vsi(vsi);
746         IXL_PF_LOCK_DESTROY(pf);
747         return (0);
748 }
749
750 /*********************************************************************
751  *
752  *  Shutdown entry point
753  *
754  **********************************************************************/
755
756 static int
757 ixl_shutdown(device_t dev)
758 {
759         struct ixl_pf *pf = device_get_softc(dev);
760         IXL_PF_LOCK(pf);
761         ixl_stop(pf);
762         IXL_PF_UNLOCK(pf);
763         return (0);
764 }
765
766
767 /*********************************************************************
768  *
769  *  Get the hardware capabilities
770  *
771  **********************************************************************/
772
773 static int
774 ixl_get_hw_capabilities(struct ixl_pf *pf)
775 {
776         struct i40e_aqc_list_capabilities_element_resp *buf;
777         struct i40e_hw  *hw = &pf->hw;
778         device_t        dev = pf->dev;
779         int             error, len;
780         u16             needed;
781         bool            again = TRUE;
782
783         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
784 retry:
785         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
786             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
787                 device_printf(dev, "Unable to allocate cap memory\n");
788                 return (ENOMEM);
789         }
790
791         /* This populates the hw struct */
792         error = i40e_aq_discover_capabilities(hw, buf, len,
793             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
794         free(buf, M_DEVBUF);
795         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
796             (again == TRUE)) {
797                 /* retry once with a larger buffer */
798                 again = FALSE;
799                 len = needed;
800                 goto retry;
801         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
802                 device_printf(dev, "capability discovery failed: %d\n",
803                     pf->hw.aq.asq_last_status);
804                 return (ENODEV);
805         }
806
807         /* Capture this PF's starting queue pair */
808         pf->qbase = hw->func_caps.base_queue;
809
810 #ifdef IXL_DEBUG
811         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
812             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
813             hw->pf_id, hw->func_caps.num_vfs,
814             hw->func_caps.num_msix_vectors,
815             hw->func_caps.num_msix_vectors_vf,
816             hw->func_caps.fd_filters_guaranteed,
817             hw->func_caps.fd_filters_best_effort,
818             hw->func_caps.num_tx_qp,
819             hw->func_caps.num_rx_qp,
820             hw->func_caps.base_queue);
821 #endif
822         return (error);
823 }
824
825 static void
826 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
827 {
828         device_t        dev = vsi->dev;
829
830         /* Enable/disable TXCSUM/TSO4 */
831         if (!(ifp->if_capenable & IFCAP_TXCSUM)
832             && !(ifp->if_capenable & IFCAP_TSO4)) {
833                 if (mask & IFCAP_TXCSUM) {
834                         ifp->if_capenable |= IFCAP_TXCSUM;
835                         /* enable TXCSUM, restore TSO if previously enabled */
836                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
837                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
838                                 ifp->if_capenable |= IFCAP_TSO4;
839                         }
840                 }
841                 else if (mask & IFCAP_TSO4) {
842                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
843                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
844                         device_printf(dev,
845                             "TSO4 requires txcsum, enabling both...\n");
846                 }
847         } else if((ifp->if_capenable & IFCAP_TXCSUM)
848             && !(ifp->if_capenable & IFCAP_TSO4)) {
849                 if (mask & IFCAP_TXCSUM)
850                         ifp->if_capenable &= ~IFCAP_TXCSUM;
851                 else if (mask & IFCAP_TSO4)
852                         ifp->if_capenable |= IFCAP_TSO4;
853         } else if((ifp->if_capenable & IFCAP_TXCSUM)
854             && (ifp->if_capenable & IFCAP_TSO4)) {
855                 if (mask & IFCAP_TXCSUM) {
856                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
857                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
858                         device_printf(dev, 
859                             "TSO4 requires txcsum, disabling both...\n");
860                 } else if (mask & IFCAP_TSO4)
861                         ifp->if_capenable &= ~IFCAP_TSO4;
862         }
863
864         /* Enable/disable TXCSUM_IPV6/TSO6 */
865         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866             && !(ifp->if_capenable & IFCAP_TSO6)) {
867                 if (mask & IFCAP_TXCSUM_IPV6) {
868                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
869                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
870                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
871                                 ifp->if_capenable |= IFCAP_TSO6;
872                         }
873                 } else if (mask & IFCAP_TSO6) {
874                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
875                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
876                         device_printf(dev,
877                             "TSO6 requires txcsum6, enabling both...\n");
878                 }
879         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
880             && !(ifp->if_capenable & IFCAP_TSO6)) {
881                 if (mask & IFCAP_TXCSUM_IPV6)
882                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
883                 else if (mask & IFCAP_TSO6)
884                         ifp->if_capenable |= IFCAP_TSO6;
885         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
886             && (ifp->if_capenable & IFCAP_TSO6)) {
887                 if (mask & IFCAP_TXCSUM_IPV6) {
888                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
889                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
890                         device_printf(dev,
891                             "TSO6 requires txcsum6, disabling both...\n");
892                 } else if (mask & IFCAP_TSO6)
893                         ifp->if_capenable &= ~IFCAP_TSO6;
894         }
895 }
896
897 /*********************************************************************
898  *  Ioctl entry point
899  *
900  *  ixl_ioctl is called when the user wants to configure the
901  *  interface.
902  *
903  *  return 0 on success, positive on failure
904  **********************************************************************/
905
906 static int
907 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
908 {
909         struct ixl_vsi  *vsi = ifp->if_softc;
910         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
911         struct ifreq    *ifr = (struct ifreq *) data;
912 #if defined(INET) || defined(INET6)
913         struct ifaddr *ifa = (struct ifaddr *)data;
914         bool            avoid_reset = FALSE;
915 #endif
916         int             error = 0;
917
918         switch (command) {
919
920         case SIOCSIFADDR:
921 #ifdef INET
922                 if (ifa->ifa_addr->sa_family == AF_INET)
923                         avoid_reset = TRUE;
924 #endif
925 #ifdef INET6
926                 if (ifa->ifa_addr->sa_family == AF_INET6)
927                         avoid_reset = TRUE;
928 #endif
929 #if defined(INET) || defined(INET6)
930                 /*
931                 ** Calling init results in link renegotiation,
932                 ** so we avoid doing it when possible.
933                 */
934                 if (avoid_reset) {
935                         ifp->if_flags |= IFF_UP;
936                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
937                                 ixl_init(pf);
938 #ifdef INET
939                         if (!(ifp->if_flags & IFF_NOARP))
940                                 arp_ifinit(ifp, ifa);
941 #endif
942                 } else
943                         error = ether_ioctl(ifp, command, data);
944                 break;
945 #endif
946         case SIOCSIFMTU:
947                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
948                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
949                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
950                         error = EINVAL;
951                 } else {
952                         IXL_PF_LOCK(pf);
953                         ifp->if_mtu = ifr->ifr_mtu;
954                         vsi->max_frame_size =
955                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
956                             + ETHER_VLAN_ENCAP_LEN;
957                         ixl_init_locked(pf);
958                         IXL_PF_UNLOCK(pf);
959                 }
960                 break;
961         case SIOCSIFFLAGS:
962                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
963                 IXL_PF_LOCK(pf);
964                 if (ifp->if_flags & IFF_UP) {
965                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
966                                 if ((ifp->if_flags ^ pf->if_flags) &
967                                     (IFF_PROMISC | IFF_ALLMULTI)) {
968                                         ixl_set_promisc(vsi);
969                                 }
970                         } else
971                                 ixl_init_locked(pf);
972                 } else
973                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
974                                 ixl_stop(pf);
975                 pf->if_flags = ifp->if_flags;
976                 IXL_PF_UNLOCK(pf);
977                 break;
978         case SIOCADDMULTI:
979                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
980                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
981                         IXL_PF_LOCK(pf);
982                         ixl_disable_intr(vsi);
983                         ixl_add_multi(vsi);
984                         ixl_enable_intr(vsi);
985                         IXL_PF_UNLOCK(pf);
986                 }
987                 break;
988         case SIOCDELMULTI:
989                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
990                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
991                         IXL_PF_LOCK(pf);
992                         ixl_disable_intr(vsi);
993                         ixl_del_multi(vsi);
994                         ixl_enable_intr(vsi);
995                         IXL_PF_UNLOCK(pf);
996                 }
997                 break;
998         case SIOCSIFMEDIA:
999         case SIOCGIFMEDIA:
1000                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1001                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1002                 break;
1003         case SIOCSIFCAP:
1004         {
1005                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1006                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1007
1008                 ixl_cap_txcsum_tso(vsi, ifp, mask);
1009
1010                 if (mask & IFCAP_RXCSUM)
1011                         ifp->if_capenable ^= IFCAP_RXCSUM;
1012                 if (mask & IFCAP_RXCSUM_IPV6)
1013                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1014                 if (mask & IFCAP_LRO)
1015                         ifp->if_capenable ^= IFCAP_LRO;
1016                 if (mask & IFCAP_VLAN_HWTAGGING)
1017                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1018                 if (mask & IFCAP_VLAN_HWFILTER)
1019                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1020                 if (mask & IFCAP_VLAN_HWTSO)
1021                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1022                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1023                         IXL_PF_LOCK(pf);
1024                         ixl_init_locked(pf);
1025                         IXL_PF_UNLOCK(pf);
1026                 }
1027                 VLAN_CAPABILITIES(ifp);
1028
1029                 break;
1030         }
1031
1032         default:
1033                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1034                 error = ether_ioctl(ifp, command, data);
1035                 break;
1036         }
1037
1038         return (error);
1039 }
1040
1041
1042 /*********************************************************************
1043  *  Init entry point
1044  *
1045  *  This routine is used in two ways. It is used by the stack as
1046  *  init entry point in network interface structure. It is also used
1047  *  by the driver as a hw/sw initialization routine to get to a
1048  *  consistent state.
1049  *
1050  *  return 0 on success, positive on failure
1051  **********************************************************************/
1052
1053 static void
1054 ixl_init_locked(struct ixl_pf *pf)
1055 {
1056         struct i40e_hw  *hw = &pf->hw;
1057         struct ixl_vsi  *vsi = &pf->vsi;
1058         struct ifnet    *ifp = vsi->ifp;
1059         device_t        dev = pf->dev;
1060         struct i40e_filter_control_settings     filter;
1061         u8              tmpaddr[ETHER_ADDR_LEN];
1062         int             ret;
1063
1064         mtx_assert(&pf->pf_mtx, MA_OWNED);
1065         INIT_DEBUGOUT("ixl_init: begin");
1066         ixl_stop(pf);
1067
1068         /* Get the latest mac address... User might use a LAA */
1069         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1070               I40E_ETH_LENGTH_OF_ADDRESS);
1071         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
1072             i40e_validate_mac_addr(tmpaddr)) {
1073                 bcopy(tmpaddr, hw->mac.addr,
1074                     I40E_ETH_LENGTH_OF_ADDRESS);
1075                 ret = i40e_aq_mac_address_write(hw,
1076                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1077                     hw->mac.addr, NULL);
1078                 if (ret) {
1079                         device_printf(dev, "LLA address"
1080                          "change failed!!\n");
1081                         return;
1082                 }
1083         }
1084
1085         /* Set the various hardware offload abilities */
1086         ifp->if_hwassist = 0;
1087         if (ifp->if_capenable & IFCAP_TSO)
1088                 ifp->if_hwassist |= CSUM_TSO;
1089         if (ifp->if_capenable & IFCAP_TXCSUM)
1090                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1091         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1092                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1093
1094         /* Set up the device filtering */
1095         bzero(&filter, sizeof(filter));
1096         filter.enable_ethtype = TRUE;
1097         filter.enable_macvlan = TRUE;
1098 #ifdef IXL_FDIR
1099         filter.enable_fdir = TRUE;
1100 #endif
1101         if (i40e_set_filter_control(hw, &filter))
1102                 device_printf(dev, "set_filter_control() failed\n");
1103
1104         /* Set up RSS */
1105         ixl_config_rss(vsi);
1106
1107         /*
1108         ** Prepare the VSI: rings, hmc contexts, etc...
1109         */
1110         if (ixl_initialize_vsi(vsi)) {
1111                 device_printf(dev, "initialize vsi failed!!\n");
1112                 return;
1113         }
1114
1115         /* Add protocol filters to list */
1116         ixl_init_filters(vsi);
1117
1118         /* Setup vlan's if needed */
1119         ixl_setup_vlan_filters(vsi);
1120
1121         /* Start the local timer */
1122         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1123
1124         /* Set up MSI/X routing and the ITR settings */
1125         if (ixl_enable_msix) {
1126                 ixl_configure_msix(pf);
1127                 ixl_configure_itr(pf);
1128         } else
1129                 ixl_configure_legacy(pf);
1130
1131         ixl_enable_rings(vsi);
1132
1133         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1134
1135         /* Set MTU in hardware*/
1136         int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1137             TRUE, 0, NULL);
1138         if (aq_error)
1139                 device_printf(vsi->dev,
1140                         "aq_set_mac_config in init error, code %d\n",
1141                     aq_error);
1142
1143         /* And now turn on interrupts */
1144         ixl_enable_intr(vsi);
1145
1146         /* Now inform the stack we're ready */
1147         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1148         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1149
1150         return;
1151 }
1152
1153 static void
1154 ixl_init(void *arg)
1155 {
1156         struct ixl_pf *pf = arg;
1157
1158         IXL_PF_LOCK(pf);
1159         ixl_init_locked(pf);
1160         IXL_PF_UNLOCK(pf);
1161         return;
1162 }
1163
1164 /*
1165 **
1166 ** MSIX Interrupt Handlers and Tasklets
1167 **
1168 */
1169 static void
1170 ixl_handle_que(void *context, int pending)
1171 {
1172         struct ixl_queue *que = context;
1173         struct ixl_vsi *vsi = que->vsi;
1174         struct i40e_hw  *hw = vsi->hw;
1175         struct tx_ring  *txr = &que->txr;
1176         struct ifnet    *ifp = vsi->ifp;
1177         bool            more;
1178
1179         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1180                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1181                 IXL_TX_LOCK(txr);
1182                 ixl_txeof(que);
1183                 if (!drbr_empty(ifp, txr->br))
1184                         ixl_mq_start_locked(ifp, txr);
1185                 IXL_TX_UNLOCK(txr);
1186                 if (more) {
1187                         taskqueue_enqueue(que->tq, &que->task);
1188                         return;
1189                 }
1190         }
1191
1192         /* Reenable this interrupt - hmmm */
1193         ixl_enable_queue(hw, que->me);
1194         return;
1195 }
1196
1197
1198 /*********************************************************************
1199  *
1200  *  Legacy Interrupt Service routine
1201  *
1202  **********************************************************************/
1203 void
1204 ixl_intr(void *arg)
1205 {
1206         struct ixl_pf           *pf = arg;
1207         struct i40e_hw          *hw =  &pf->hw;
1208         struct ixl_vsi          *vsi = &pf->vsi;
1209         struct ixl_queue        *que = vsi->queues;
1210         struct ifnet            *ifp = vsi->ifp;
1211         struct tx_ring          *txr = &que->txr;
1212         u32                     reg, icr0, mask;
1213         bool                    more_tx, more_rx;
1214
1215         ++que->irqs;
1216
1217         /* Protect against spurious interrupts */
1218         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1219                 return;
1220
1221         icr0 = rd32(hw, I40E_PFINT_ICR0);
1222
1223         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1224         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1225         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1226
1227         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1228
1229         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1230                 taskqueue_enqueue(pf->tq, &pf->adminq);
1231                 return;
1232         }
1233
1234         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1235
1236         IXL_TX_LOCK(txr);
1237         more_tx = ixl_txeof(que);
1238         if (!drbr_empty(vsi->ifp, txr->br))
1239                 more_tx = 1;
1240         IXL_TX_UNLOCK(txr);
1241
1242         /* re-enable other interrupt causes */
1243         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1244
1245         /* And now the queues */
1246         reg = rd32(hw, I40E_QINT_RQCTL(0));
1247         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1248         wr32(hw, I40E_QINT_RQCTL(0), reg);
1249
1250         reg = rd32(hw, I40E_QINT_TQCTL(0));
1251         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1252         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1253         wr32(hw, I40E_QINT_TQCTL(0), reg);
1254
1255         ixl_enable_legacy(hw);
1256
1257         return;
1258 }
1259
1260
1261 /*********************************************************************
1262  *
1263  *  MSIX VSI Interrupt Service routine
1264  *
1265  **********************************************************************/
1266 void
1267 ixl_msix_que(void *arg)
1268 {
1269         struct ixl_queue        *que = arg;
1270         struct ixl_vsi  *vsi = que->vsi;
1271         struct i40e_hw  *hw = vsi->hw;
1272         struct tx_ring  *txr = &que->txr;
1273         bool            more_tx, more_rx;
1274
1275         /* Protect against spurious interrupts */
1276         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1277                 return;
1278
1279         ++que->irqs;
1280
1281         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1282
1283         IXL_TX_LOCK(txr);
1284         more_tx = ixl_txeof(que);
1285         /*
1286         ** Make certain that if the stack 
1287         ** has anything queued the task gets
1288         ** scheduled to handle it.
1289         */
1290         if (!drbr_empty(vsi->ifp, txr->br))
1291                 more_tx = 1;
1292         IXL_TX_UNLOCK(txr);
1293
1294         ixl_set_queue_rx_itr(que);
1295         ixl_set_queue_tx_itr(que);
1296
1297         if (more_tx || more_rx)
1298                 taskqueue_enqueue(que->tq, &que->task);
1299         else
1300                 ixl_enable_queue(hw, que->me);
1301
1302         return;
1303 }
1304
1305
1306 /*********************************************************************
1307  *
1308  *  MSIX Admin Queue Interrupt Service routine
1309  *
1310  **********************************************************************/
1311 static void
1312 ixl_msix_adminq(void *arg)
1313 {
1314         struct ixl_pf   *pf = arg;
1315         struct i40e_hw  *hw = &pf->hw;
1316         u32             reg, mask;
1317
1318         ++pf->admin_irq;
1319
1320         reg = rd32(hw, I40E_PFINT_ICR0);
1321         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1322
1323         /* Check on the cause */
1324         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1325                 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1326
1327         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1328                 ixl_handle_mdd_event(pf);
1329                 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1330         }
1331
1332         if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1333                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1334
1335         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1336         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1337         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1338
1339         taskqueue_enqueue(pf->tq, &pf->adminq);
1340         return;
1341 }
1342
1343 /*********************************************************************
1344  *
1345  *  Media Ioctl callback
1346  *
1347  *  This routine is called whenever the user queries the status of
1348  *  the interface using ifconfig.
1349  *
1350  **********************************************************************/
1351 static void
1352 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1353 {
1354         struct ixl_vsi  *vsi = ifp->if_softc;
1355         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
1356         struct i40e_hw  *hw = &pf->hw;
1357
1358         INIT_DEBUGOUT("ixl_media_status: begin");
1359         IXL_PF_LOCK(pf);
1360
1361         ixl_update_link_status(pf);
1362
1363         ifmr->ifm_status = IFM_AVALID;
1364         ifmr->ifm_active = IFM_ETHER;
1365
1366         if (!vsi->link_up) {
1367                 IXL_PF_UNLOCK(pf);
1368                 return;
1369         }
1370
1371         ifmr->ifm_status |= IFM_ACTIVE;
1372         /* Hardware is always full-duplex */
1373         ifmr->ifm_active |= IFM_FDX;
1374
1375         switch (hw->phy.link_info.phy_type) {
1376                 /* 100 M */
1377                 case I40E_PHY_TYPE_100BASE_TX:
1378                         ifmr->ifm_active |= IFM_100_TX;
1379                         break;
1380                 /* 1 G */
1381                 case I40E_PHY_TYPE_1000BASE_T:
1382                         ifmr->ifm_active |= IFM_1000_T;
1383                         break;
1384                 case I40E_PHY_TYPE_1000BASE_SX:
1385                         ifmr->ifm_active |= IFM_1000_SX;
1386                         break;
1387                 case I40E_PHY_TYPE_1000BASE_LX:
1388                         ifmr->ifm_active |= IFM_1000_LX;
1389                         break;
1390                 /* 10 G */
1391                 case I40E_PHY_TYPE_10GBASE_CR1:
1392                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1393                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1394                 /* Using this until a real KR media type */
1395                 case I40E_PHY_TYPE_10GBASE_KR:
1396                 case I40E_PHY_TYPE_10GBASE_KX4:
1397                         ifmr->ifm_active |= IFM_10G_TWINAX;
1398                         break;
1399                 case I40E_PHY_TYPE_10GBASE_SR:
1400                         ifmr->ifm_active |= IFM_10G_SR;
1401                         break;
1402                 case I40E_PHY_TYPE_10GBASE_LR:
1403                         ifmr->ifm_active |= IFM_10G_LR;
1404                         break;
1405                 case I40E_PHY_TYPE_10GBASE_T:
1406                         ifmr->ifm_active |= IFM_10G_T;
1407                         break;
1408                 /* 40 G */
1409                 case I40E_PHY_TYPE_40GBASE_CR4:
1410                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1411                         ifmr->ifm_active |= IFM_40G_CR4;
1412                         break;
1413                 case I40E_PHY_TYPE_40GBASE_SR4:
1414                         ifmr->ifm_active |= IFM_40G_SR4;
1415                         break;
1416                 case I40E_PHY_TYPE_40GBASE_LR4:
1417                         ifmr->ifm_active |= IFM_40G_LR4;
1418                         break;
1419                 /*
1420                 ** Set these to CR4 because OS does not
1421                 ** have types available yet.
1422                 */
1423                 case I40E_PHY_TYPE_40GBASE_KR4:
1424                 case I40E_PHY_TYPE_XLAUI:
1425                 case I40E_PHY_TYPE_XLPPI:
1426                 case I40E_PHY_TYPE_40GBASE_AOC:
1427                         ifmr->ifm_active |= IFM_40G_CR4;
1428                         break;
1429                 default:
1430                         ifmr->ifm_active |= IFM_UNKNOWN;
1431                         break;
1432         }
1433         /* Report flow control status as well */
1434         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1435                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1436         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1437                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1438
1439         IXL_PF_UNLOCK(pf);
1440
1441         return;
1442 }
1443
1444 /*********************************************************************
1445  *
1446  *  Media Ioctl callback
1447  *
1448  *  This routine is called when the user changes speed/duplex using
1449  *  media/mediopt option with ifconfig.
1450  *
1451  **********************************************************************/
1452 static int
1453 ixl_media_change(struct ifnet * ifp)
1454 {
1455         struct ixl_vsi *vsi = ifp->if_softc;
1456         struct ifmedia *ifm = &vsi->media;
1457
1458         INIT_DEBUGOUT("ixl_media_change: begin");
1459
1460         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1461                 return (EINVAL);
1462
1463         if_printf(ifp, "Media change is currently not supported.\n");
1464
1465         return (ENODEV);
1466 }
1467
1468
1469 #ifdef IXL_FDIR
1470 /*
1471 ** ATR: Application Targetted Receive - creates a filter
1472 **      based on TX flow info that will keep the receive
1473 **      portion of the flow on the same queue. Based on the
1474 **      implementation this is only available for TCP connections
1475 */
1476 void
1477 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1478 {
1479         struct ixl_vsi                  *vsi = que->vsi;
1480         struct tx_ring                  *txr = &que->txr;
1481         struct i40e_filter_program_desc *FDIR;
1482         u32                             ptype, dtype;
1483         int                             idx;
1484
1485         /* check if ATR is enabled and sample rate */
1486         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1487                 return;
1488         /*
1489         ** We sample all TCP SYN/FIN packets,
1490         ** or at the selected sample rate 
1491         */
1492         txr->atr_count++;
1493         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1494             (txr->atr_count < txr->atr_rate))
1495                 return;
1496         txr->atr_count = 0;
1497
1498         /* Get a descriptor to use */
1499         idx = txr->next_avail;
1500         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1501         if (++idx == que->num_desc)
1502                 idx = 0;
1503         txr->avail--;
1504         txr->next_avail = idx;
1505
1506         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1507             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1508
1509         ptype |= (etype == ETHERTYPE_IP) ?
1510             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1511             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1512             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1513             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1514
1515         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1516
1517         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1518
1519         /*
1520         ** We use the TCP TH_FIN as a trigger to remove
1521         ** the filter, otherwise its an update.
1522         */
1523         dtype |= (th->th_flags & TH_FIN) ?
1524             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1525             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1526             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1527             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1528
1529         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1530             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1531
1532         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1533             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1534
1535         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1536         FDIR->dtype_cmd_cntindex = htole32(dtype);
1537         return;
1538 }
1539 #endif
1540
1541
1542 static void
1543 ixl_set_promisc(struct ixl_vsi *vsi)
1544 {
1545         struct ifnet    *ifp = vsi->ifp;
1546         struct i40e_hw  *hw = vsi->hw;
1547         int             err, mcnt = 0;
1548         bool            uni = FALSE, multi = FALSE;
1549
1550         if (ifp->if_flags & IFF_ALLMULTI)
1551                 multi = TRUE;
1552         else { /* Need to count the multicast addresses */
1553                 struct  ifmultiaddr *ifma;
1554                 if_maddr_rlock(ifp);
1555                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1556                         if (ifma->ifma_addr->sa_family != AF_LINK)
1557                                 continue;
1558                         if (mcnt == MAX_MULTICAST_ADDR)
1559                                 break;
1560                         mcnt++;
1561                 }
1562                 if_maddr_runlock(ifp);
1563         }
1564
1565         if (mcnt >= MAX_MULTICAST_ADDR)
1566                 multi = TRUE;
1567         if (ifp->if_flags & IFF_PROMISC)
1568                 uni = TRUE;
1569
1570         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1571             vsi->seid, uni, NULL);
1572         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1573             vsi->seid, multi, NULL);
1574         return;
1575 }
1576
1577 /*********************************************************************
1578  *      Filter Routines
1579  *
1580  *      Routines for multicast and vlan filter management.
1581  *
1582  *********************************************************************/
1583 static void
1584 ixl_add_multi(struct ixl_vsi *vsi)
1585 {
1586         struct  ifmultiaddr     *ifma;
1587         struct ifnet            *ifp = vsi->ifp;
1588         struct i40e_hw          *hw = vsi->hw;
1589         int                     mcnt = 0, flags;
1590
1591         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1592
1593         if_maddr_rlock(ifp);
1594         /*
1595         ** First just get a count, to decide if we
1596         ** we simply use multicast promiscuous.
1597         */
1598         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1599                 if (ifma->ifma_addr->sa_family != AF_LINK)
1600                         continue;
1601                 mcnt++;
1602         }
1603         if_maddr_runlock(ifp);
1604
1605         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1606                 /* delete existing MC filters */
1607                 ixl_del_hw_filters(vsi, mcnt);
1608                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1609                     vsi->seid, TRUE, NULL);
1610                 return;
1611         }
1612
1613         mcnt = 0;
1614         if_maddr_rlock(ifp);
1615         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1616                 if (ifma->ifma_addr->sa_family != AF_LINK)
1617                         continue;
1618                 ixl_add_mc_filter(vsi,
1619                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1620                 mcnt++;
1621         }
1622         if_maddr_runlock(ifp);
1623         if (mcnt > 0) {
1624                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1625                 ixl_add_hw_filters(vsi, flags, mcnt);
1626         }
1627
1628         IOCTL_DEBUGOUT("ixl_add_multi: end");
1629         return;
1630 }
1631
1632 static void
1633 ixl_del_multi(struct ixl_vsi *vsi)
1634 {
1635         struct ifnet            *ifp = vsi->ifp;
1636         struct ifmultiaddr      *ifma;
1637         struct ixl_mac_filter   *f;
1638         int                     mcnt = 0;
1639         bool            match = FALSE;
1640
1641         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1642
1643         /* Search for removed multicast addresses */
1644         if_maddr_rlock(ifp);
1645         SLIST_FOREACH(f, &vsi->ftl, next) {
1646                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1647                         match = FALSE;
1648                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1649                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1650                                         continue;
1651                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1652                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1653                                         match = TRUE;
1654                                         break;
1655                                 }
1656                         }
1657                         if (match == FALSE) {
1658                                 f->flags |= IXL_FILTER_DEL;
1659                                 mcnt++;
1660                         }
1661                 }
1662         }
1663         if_maddr_runlock(ifp);
1664
1665         if (mcnt > 0)
1666                 ixl_del_hw_filters(vsi, mcnt);
1667 }
1668
1669
1670 /*********************************************************************
1671  *  Timer routine
1672  *
1673  *  This routine checks for link status,updates statistics,
1674  *  and runs the watchdog check.
1675  *
1676  **********************************************************************/
1677
1678 static void
1679 ixl_local_timer(void *arg)
1680 {
1681         struct ixl_pf           *pf = arg;
1682         struct i40e_hw          *hw = &pf->hw;
1683         struct ixl_vsi          *vsi = &pf->vsi;
1684         struct ixl_queue        *que = vsi->queues;
1685         device_t                dev = pf->dev;
1686         int                     hung = 0;
1687         u32                     mask;
1688
1689         mtx_assert(&pf->pf_mtx, MA_OWNED);
1690
1691         /* Fire off the adminq task */
1692         taskqueue_enqueue(pf->tq, &pf->adminq);
1693
1694         /* Update stats */
1695         ixl_update_stats_counters(pf);
1696
1697         /*
1698         ** Check status of the queues
1699         */
1700         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1701                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1702  
1703         for (int i = 0; i < vsi->num_queues; i++,que++) {
1704                 /* Any queues with outstanding work get a sw irq */
1705                 if (que->busy)
1706                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1707                 /*
1708                 ** Each time txeof runs without cleaning, but there
1709                 ** are uncleaned descriptors it increments busy. If
1710                 ** we get to 5 we declare it hung.
1711                 */
1712                 if (que->busy == IXL_QUEUE_HUNG) {
1713                         ++hung;
1714                         /* Mark the queue as inactive */
1715                         vsi->active_queues &= ~((u64)1 << que->me);
1716                         continue;
1717                 } else {
1718                         /* Check if we've come back from hung */
1719                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1720                                 vsi->active_queues |= ((u64)1 << que->me);
1721                 }
1722                 if (que->busy >= IXL_MAX_TX_BUSY) {
1723 #ifdef IXL_DEBUG
1724                         device_printf(dev,"Warning queue %d "
1725                             "appears to be hung!\n", i);
1726 #endif
1727                         que->busy = IXL_QUEUE_HUNG;
1728                         ++hung;
1729                 }
1730         }
1731         /* Only reinit if all queues show hung */
1732         if (hung == vsi->num_queues)
1733                 goto hung;
1734
1735         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1736         return;
1737
1738 hung:
1739         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1740         ixl_init_locked(pf);
1741 }
1742
1743 /*
1744 ** Note: this routine updates the OS on the link state
1745 **      the real check of the hardware only happens with
1746 **      a link interrupt.
1747 */
1748 static void
1749 ixl_update_link_status(struct ixl_pf *pf)
1750 {
1751         struct ixl_vsi          *vsi = &pf->vsi;
1752         struct i40e_hw          *hw = &pf->hw;
1753         struct ifnet            *ifp = vsi->ifp;
1754         device_t                dev = pf->dev;
1755
1756
1757         if (vsi->link_up){ 
1758                 if (vsi->link_active == FALSE) {
1759                         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1760                         pf->fc = hw->fc.current_mode;
1761                         if (bootverbose) {
1762                                 device_printf(dev,"Link is up %d Gbps %s,"
1763                                     " Flow Control: %s\n",
1764                                     ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1765                                     "Full Duplex", ixl_fc_string[pf->fc]);
1766                         }
1767                         vsi->link_active = TRUE;
1768                         /*
1769                         ** Warn user if link speed on NPAR enabled
1770                         ** partition is not at least 10GB
1771                         */
1772                         if (hw->func_caps.npar_enable &&
1773                            (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1774                            hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1775                                 device_printf(dev, "The partition detected link"
1776                                     "speed that is less than 10Gbps\n");
1777                         if_link_state_change(ifp, LINK_STATE_UP);
1778                 }
1779         } else { /* Link down */
1780                 if (vsi->link_active == TRUE) {
1781                         if (bootverbose)
1782                                 device_printf(dev,"Link is Down\n");
1783                         if_link_state_change(ifp, LINK_STATE_DOWN);
1784                         vsi->link_active = FALSE;
1785                 }
1786         }
1787
1788         return;
1789 }
1790
1791 /*********************************************************************
1792  *
1793  *  This routine disables all traffic on the adapter by issuing a
1794  *  global reset on the MAC and deallocates TX/RX buffers.
1795  *
1796  **********************************************************************/
1797
1798 static void
1799 ixl_stop(struct ixl_pf *pf)
1800 {
1801         struct ixl_vsi  *vsi = &pf->vsi;
1802         struct ifnet    *ifp = vsi->ifp;
1803
1804         mtx_assert(&pf->pf_mtx, MA_OWNED);
1805
1806         INIT_DEBUGOUT("ixl_stop: begin\n");
1807         ixl_disable_intr(vsi);
1808         ixl_disable_rings(vsi);
1809
1810         /* Tell the stack that the interface is no longer active */
1811         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1812
1813         /* Stop the local timer */
1814         callout_stop(&pf->timer);
1815
1816         return;
1817 }
1818
1819
1820 /*********************************************************************
1821  *
1822  *  Setup MSIX Interrupt resources and handlers for the VSI
1823  *
1824  **********************************************************************/
1825 static int
1826 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1827 {
1828         device_t        dev = pf->dev;
1829         struct          ixl_vsi *vsi = &pf->vsi;
1830         struct          ixl_queue *que = vsi->queues;
1831         int             error, rid = 0;
1832
1833         if (pf->msix == 1)
1834                 rid = 1;
1835         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1836             &rid, RF_SHAREABLE | RF_ACTIVE);
1837         if (pf->res == NULL) {
1838                 device_printf(dev,"Unable to allocate"
1839                     " bus resource: vsi legacy/msi interrupt\n");
1840                 return (ENXIO);
1841         }
1842
1843         /* Set the handler function */
1844         error = bus_setup_intr(dev, pf->res,
1845             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1846             ixl_intr, pf, &pf->tag);
1847         if (error) {
1848                 pf->res = NULL;
1849                 device_printf(dev, "Failed to register legacy/msi handler");
1850                 return (error);
1851         }
1852         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1853         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1854         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1855         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1856             taskqueue_thread_enqueue, &que->tq);
1857         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1858             device_get_nameunit(dev));
1859         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1860         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1861             taskqueue_thread_enqueue, &pf->tq);
1862         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1863             device_get_nameunit(dev));
1864
1865         return (0);
1866 }
1867
1868
1869 /*********************************************************************
1870  *
1871  *  Setup MSIX Interrupt resources and handlers for the VSI
1872  *
1873  **********************************************************************/
1874 static int
1875 ixl_assign_vsi_msix(struct ixl_pf *pf)
1876 {
1877         device_t        dev = pf->dev;
1878         struct          ixl_vsi *vsi = &pf->vsi;
1879         struct          ixl_queue *que = vsi->queues;
1880         struct          tx_ring  *txr;
1881         int             error, rid, vector = 0;
1882
1883         /* Admin Que is vector 0*/
1884         rid = vector + 1;
1885         pf->res = bus_alloc_resource_any(dev,
1886             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1887         if (!pf->res) {
1888                 device_printf(dev,"Unable to allocate"
1889             " bus resource: Adminq interrupt [%d]\n", rid);
1890                 return (ENXIO);
1891         }
1892         /* Set the adminq vector and handler */
1893         error = bus_setup_intr(dev, pf->res,
1894             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1895             ixl_msix_adminq, pf, &pf->tag);
1896         if (error) {
1897                 pf->res = NULL;
1898                 device_printf(dev, "Failed to register Admin que handler");
1899                 return (error);
1900         }
1901         bus_describe_intr(dev, pf->res, pf->tag, "aq");
1902         pf->admvec = vector;
1903         /* Tasklet for Admin Queue */
1904         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1905         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1906             taskqueue_thread_enqueue, &pf->tq);
1907         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1908             device_get_nameunit(pf->dev));
1909         ++vector;
1910
1911         /* Now set up the stations */
1912         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1913                 int cpu_id = i;
1914                 rid = vector + 1;
1915                 txr = &que->txr;
1916                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1917                     RF_SHAREABLE | RF_ACTIVE);
1918                 if (que->res == NULL) {
1919                         device_printf(dev,"Unable to allocate"
1920                             " bus resource: que interrupt [%d]\n", vector);
1921                         return (ENXIO);
1922                 }
1923                 /* Set the handler function */
1924                 error = bus_setup_intr(dev, que->res,
1925                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1926                     ixl_msix_que, que, &que->tag);
1927                 if (error) {
1928                         que->res = NULL;
1929                         device_printf(dev, "Failed to register que handler");
1930                         return (error);
1931                 }
1932                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1933                 /* Bind the vector to a CPU */
1934 #ifdef RSS
1935                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1936 #endif
1937                 bus_bind_intr(dev, que->res, cpu_id);
1938                 que->msix = vector;
1939                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1940                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1941                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1942                     taskqueue_thread_enqueue, &que->tq);
1943 #ifdef RSS
1944                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1945                     cpu_id, "%s (bucket %d)",
1946                     device_get_nameunit(dev), cpu_id);
1947 #else
1948                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1949                     "%s que", device_get_nameunit(dev));
1950 #endif
1951         }
1952
1953         return (0);
1954 }
1955
1956
1957 /*
1958  * Allocate MSI/X vectors
1959  */
1960 static int
1961 ixl_init_msix(struct ixl_pf *pf)
1962 {
1963         device_t dev = pf->dev;
1964         int rid, want, vectors, queues, available;
1965
1966         /* Override by tuneable */
1967         if (ixl_enable_msix == 0)
1968                 goto msi;
1969
1970         /*
1971         ** When used in a virtualized environment 
1972         ** PCI BUSMASTER capability may not be set
1973         ** so explicity set it here and rewrite
1974         ** the ENABLE in the MSIX control register
1975         ** at this point to cause the host to
1976         ** successfully initialize us.
1977         */
1978         {
1979                 u16 pci_cmd_word;
1980                 int msix_ctrl;
1981                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1982                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1983                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1984                 pci_find_cap(dev, PCIY_MSIX, &rid);
1985                 rid += PCIR_MSIX_CTRL;
1986                 msix_ctrl = pci_read_config(dev, rid, 2);
1987                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1988                 pci_write_config(dev, rid, msix_ctrl, 2);
1989         }
1990
1991         /* First try MSI/X */
1992         rid = PCIR_BAR(IXL_BAR);
1993         pf->msix_mem = bus_alloc_resource_any(dev,
1994             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1995         if (!pf->msix_mem) {
1996                 /* May not be enabled */
1997                 device_printf(pf->dev,
1998                     "Unable to map MSIX table \n");
1999                 goto msi;
2000         }
2001
2002         available = pci_msix_count(dev); 
2003         if (available == 0) { /* system has msix disabled */
2004                 bus_release_resource(dev, SYS_RES_MEMORY,
2005                     rid, pf->msix_mem);
2006                 pf->msix_mem = NULL;
2007                 goto msi;
2008         }
2009
2010         /* Figure out a reasonable auto config value */
2011         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2012
2013         /* Override with hardcoded value if sane */
2014         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
2015                 queues = ixl_max_queues;
2016
2017 #ifdef  RSS
2018         /* If we're doing RSS, clamp at the number of RSS buckets */
2019         if (queues > rss_getnumbuckets())
2020                 queues = rss_getnumbuckets();
2021 #endif
2022
2023         /*
2024         ** Want one vector (RX/TX pair) per queue
2025         ** plus an additional for the admin queue.
2026         */
2027         want = queues + 1;
2028         if (want <= available)  /* Have enough */
2029                 vectors = want;
2030         else {
2031                 device_printf(pf->dev,
2032                     "MSIX Configuration Problem, "
2033                     "%d vectors available but %d wanted!\n",
2034                     available, want);
2035                 return (0); /* Will go to Legacy setup */
2036         }
2037
2038         if (pci_alloc_msix(dev, &vectors) == 0) {
2039                 device_printf(pf->dev,
2040                     "Using MSIX interrupts with %d vectors\n", vectors);
2041                 pf->msix = vectors;
2042                 pf->vsi.num_queues = queues;
2043 #ifdef RSS
2044                 /*
2045                  * If we're doing RSS, the number of queues needs to
2046                  * match the number of RSS buckets that are configured.
2047                  *
2048                  * + If there's more queues than RSS buckets, we'll end
2049                  *   up with queues that get no traffic.
2050                  *
2051                  * + If there's more RSS buckets than queues, we'll end
2052                  *   up having multiple RSS buckets map to the same queue,
2053                  *   so there'll be some contention.
2054                  */
2055                 if (queues != rss_getnumbuckets()) {
2056                         device_printf(dev,
2057                             "%s: queues (%d) != RSS buckets (%d)"
2058                             "; performance will be impacted.\n",
2059                             __func__, queues, rss_getnumbuckets());
2060                 }
2061 #endif
2062                 return (vectors);
2063         }
2064 msi:
2065         vectors = pci_msi_count(dev);
2066         pf->vsi.num_queues = 1;
2067         pf->msix = 1;
2068         ixl_max_queues = 1;
2069         ixl_enable_msix = 0;
2070         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2071                 device_printf(pf->dev,"Using an MSI interrupt\n");
2072         else {
2073                 pf->msix = 0;
2074                 device_printf(pf->dev,"Using a Legacy interrupt\n");
2075         }
2076         return (vectors);
2077 }
2078
2079
2080 /*
2081  * Plumb MSI/X vectors
2082  */
2083 static void
2084 ixl_configure_msix(struct ixl_pf *pf)
2085 {
2086         struct i40e_hw  *hw = &pf->hw;
2087         struct ixl_vsi *vsi = &pf->vsi;
2088         u32             reg;
2089         u16             vector = 1;
2090
2091         /* First set up the adminq - vector 0 */
2092         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2093         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2094
2095         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2096             I40E_PFINT_ICR0_ENA_GRST_MASK |
2097             I40E_PFINT_ICR0_HMC_ERR_MASK |
2098             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2099             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2100             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2101             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2102         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2103
2104         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2105         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2106
2107         wr32(hw, I40E_PFINT_DYN_CTL0,
2108             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2109             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2110
2111         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2112
2113         /* Next configure the queues */
2114         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2115                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2116                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2117
2118                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2119                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2120                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2121                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2122                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2123                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2124
2125                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2126                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2127                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2128                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2129                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2130                 if (i == (vsi->num_queues - 1))
2131                         reg |= (IXL_QUEUE_EOL
2132                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2133                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2134         }
2135 }
2136
2137 /*
2138  * Configure for MSI single vector operation 
2139  */
2140 static void
2141 ixl_configure_legacy(struct ixl_pf *pf)
2142 {
2143         struct i40e_hw  *hw = &pf->hw;
2144         u32             reg;
2145
2146
2147         wr32(hw, I40E_PFINT_ITR0(0), 0);
2148         wr32(hw, I40E_PFINT_ITR0(1), 0);
2149
2150
2151         /* Setup "other" causes */
2152         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2153             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2154             | I40E_PFINT_ICR0_ENA_GRST_MASK
2155             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2156             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2157             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2158             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2159             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2160             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2161             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2162             ;
2163         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2164
2165         /* SW_ITR_IDX = 0, but don't change INTENA */
2166         wr32(hw, I40E_PFINT_DYN_CTL0,
2167             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2168             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2169         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2170         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2171
2172         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2173         wr32(hw, I40E_PFINT_LNKLST0, 0);
2174
2175         /* Associate the queue pair to the vector and enable the q int */
2176         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2177             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2178             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2179         wr32(hw, I40E_QINT_RQCTL(0), reg);
2180
2181         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2182             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2183             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2184         wr32(hw, I40E_QINT_TQCTL(0), reg);
2185
2186         /* Next enable the queue pair */
2187         reg = rd32(hw, I40E_QTX_ENA(0));
2188         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2189         wr32(hw, I40E_QTX_ENA(0), reg);
2190
2191         reg = rd32(hw, I40E_QRX_ENA(0));
2192         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2193         wr32(hw, I40E_QRX_ENA(0), reg);
2194 }
2195
2196
2197 /*
2198  * Set the Initial ITR state
2199  */
2200 static void
2201 ixl_configure_itr(struct ixl_pf *pf)
2202 {
2203         struct i40e_hw          *hw = &pf->hw;
2204         struct ixl_vsi          *vsi = &pf->vsi;
2205         struct ixl_queue        *que = vsi->queues;
2206
2207         vsi->rx_itr_setting = ixl_rx_itr;
2208         if (ixl_dynamic_rx_itr)
2209                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2210         vsi->tx_itr_setting = ixl_tx_itr;
2211         if (ixl_dynamic_tx_itr)
2212                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2213         
2214         for (int i = 0; i < vsi->num_queues; i++, que++) {
2215                 struct tx_ring  *txr = &que->txr;
2216                 struct rx_ring  *rxr = &que->rxr;
2217
2218                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2219                     vsi->rx_itr_setting);
2220                 rxr->itr = vsi->rx_itr_setting;
2221                 rxr->latency = IXL_AVE_LATENCY;
2222                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2223                     vsi->tx_itr_setting);
2224                 txr->itr = vsi->tx_itr_setting;
2225                 txr->latency = IXL_AVE_LATENCY;
2226         }
2227 }
2228
2229
2230 static int
2231 ixl_allocate_pci_resources(struct ixl_pf *pf)
2232 {
2233         int             rid;
2234         device_t        dev = pf->dev;
2235
2236         rid = PCIR_BAR(0);
2237         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2238             &rid, RF_ACTIVE);
2239
2240         if (!(pf->pci_mem)) {
2241                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2242                 return (ENXIO);
2243         }
2244
2245         pf->osdep.mem_bus_space_tag =
2246                 rman_get_bustag(pf->pci_mem);
2247         pf->osdep.mem_bus_space_handle =
2248                 rman_get_bushandle(pf->pci_mem);
2249         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2250         pf->osdep.flush_reg = I40E_GLGEN_STAT;
2251         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2252
2253         pf->hw.back = &pf->osdep;
2254
2255         /*
2256         ** Now setup MSI or MSI/X, should
2257         ** return us the number of supported
2258         ** vectors. (Will be 1 for MSI)
2259         */
2260         pf->msix = ixl_init_msix(pf);
2261         return (0);
2262 }
2263
2264 static void
2265 ixl_free_pci_resources(struct ixl_pf * pf)
2266 {
2267         struct ixl_vsi          *vsi = &pf->vsi;
2268         struct ixl_queue        *que = vsi->queues;
2269         device_t                dev = pf->dev;
2270         int                     rid, memrid;
2271
2272         memrid = PCIR_BAR(IXL_BAR);
2273
2274         /* We may get here before stations are setup */
2275         if ((!ixl_enable_msix) || (que == NULL))
2276                 goto early;
2277
2278         /*
2279         **  Release all msix VSI resources:
2280         */
2281         for (int i = 0; i < vsi->num_queues; i++, que++) {
2282                 rid = que->msix + 1;
2283                 if (que->tag != NULL) {
2284                         bus_teardown_intr(dev, que->res, que->tag);
2285                         que->tag = NULL;
2286                 }
2287                 if (que->res != NULL)
2288                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2289         }
2290
2291 early:
2292         /* Clean the AdminQ interrupt last */
2293         if (pf->admvec) /* we are doing MSIX */
2294                 rid = pf->admvec + 1;
2295         else
2296                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2297
2298         if (pf->tag != NULL) {
2299                 bus_teardown_intr(dev, pf->res, pf->tag);
2300                 pf->tag = NULL;
2301         }
2302         if (pf->res != NULL)
2303                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2304
2305         if (pf->msix)
2306                 pci_release_msi(dev);
2307
2308         if (pf->msix_mem != NULL)
2309                 bus_release_resource(dev, SYS_RES_MEMORY,
2310                     memrid, pf->msix_mem);
2311
2312         if (pf->pci_mem != NULL)
2313                 bus_release_resource(dev, SYS_RES_MEMORY,
2314                     PCIR_BAR(0), pf->pci_mem);
2315
2316         return;
2317 }
2318
2319 static void
2320 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2321 {
2322         /* Display supported media types */
2323         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2324                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2325
2326         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2327                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2328
2329         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2330             phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2331             phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2332             phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2333             phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2334             phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2335             phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2336             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2337                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2338
2339         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2340                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2341         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2342                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2343         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2344                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2345
2346         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2347             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2348             phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2349             phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2350             phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2351             /* KR4 uses CR4 until the OS has the real media type */
2352             phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2353                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2354
2355         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2356                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2357         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2358                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2359 }
2360
2361 /*********************************************************************
2362  *
2363  *  Setup networking device structure and register an interface.
2364  *
2365  **********************************************************************/
2366 static int
2367 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2368 {
2369         struct ifnet            *ifp;
2370         struct i40e_hw          *hw = vsi->hw;
2371         struct ixl_queue        *que = vsi->queues;
2372         struct i40e_aq_get_phy_abilities_resp abilities;
2373         enum i40e_status_code aq_error = 0;
2374
2375         INIT_DEBUGOUT("ixl_setup_interface: begin");
2376
2377         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2378         if (ifp == NULL) {
2379                 device_printf(dev, "can not allocate ifnet structure\n");
2380                 return (-1);
2381         }
2382         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2383         ifp->if_mtu = ETHERMTU;
2384         if_initbaudrate(ifp, IF_Gbps(40));
2385         ifp->if_init = ixl_init;
2386         ifp->if_softc = vsi;
2387         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2388         ifp->if_ioctl = ixl_ioctl;
2389
2390 #if __FreeBSD_version >= 1100036
2391         if_setgetcounterfn(ifp, ixl_get_counter);
2392 #endif
2393
2394         ifp->if_transmit = ixl_mq_start;
2395
2396         ifp->if_qflush = ixl_qflush;
2397
2398         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2399
2400         vsi->max_frame_size =
2401             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2402             + ETHER_VLAN_ENCAP_LEN;
2403
2404         /*
2405          * Tell the upper layer(s) we support long frames.
2406          */
2407         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2408
2409         ifp->if_capabilities |= IFCAP_HWCSUM;
2410         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2411         ifp->if_capabilities |= IFCAP_TSO;
2412         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2413         ifp->if_capabilities |= IFCAP_LRO;
2414
2415         /* VLAN capabilties */
2416         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2417                              |  IFCAP_VLAN_HWTSO
2418                              |  IFCAP_VLAN_MTU
2419                              |  IFCAP_VLAN_HWCSUM;
2420         ifp->if_capenable = ifp->if_capabilities;
2421
2422         /*
2423         ** Don't turn this on by default, if vlans are
2424         ** created on another pseudo device (eg. lagg)
2425         ** then vlan events are not passed thru, breaking
2426         ** operation, but with HW FILTER off it works. If
2427         ** using vlans directly on the ixl driver you can
2428         ** enable this and get full hardware tag filtering.
2429         */
2430         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2431
2432         /*
2433          * Specify the media types supported by this adapter and register
2434          * callbacks to update media and link information
2435          */
2436         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2437                      ixl_media_status);
2438
2439         aq_error = i40e_aq_get_phy_capabilities(hw,
2440             FALSE, TRUE, &abilities, NULL);
2441         /* May need delay to detect fiber correctly */
2442         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2443                 i40e_msec_delay(200);
2444                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2445                     TRUE, &abilities, NULL);
2446         }
2447         if (aq_error) {
2448                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2449                         device_printf(dev, "Unknown PHY type detected!\n");
2450                 else
2451                         device_printf(dev,
2452                             "Error getting supported media types, err %d,"
2453                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2454                 return (0);
2455         }
2456
2457         ixl_add_ifmedia(vsi, abilities.phy_type);
2458
2459         /* Use autoselect media by default */
2460         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2461         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2462
2463         ether_ifattach(ifp, hw->mac.addr);
2464
2465         return (0);
2466 }
2467
2468 static bool
2469 ixl_config_link(struct i40e_hw *hw)
2470 {
2471         bool check;
2472
2473         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2474         check = i40e_get_link_status(hw);
2475 #ifdef IXL_DEBUG
2476         printf("Link is %s\n", check ? "up":"down");
2477 #endif
2478         return (check);
2479 }
2480
2481 /*********************************************************************
2482  *
2483  *  Get Firmware Switch configuration
2484  *      - this will need to be more robust when more complex
2485  *        switch configurations are enabled.
2486  *
2487  **********************************************************************/
2488 static int
2489 ixl_switch_config(struct ixl_pf *pf)
2490 {
2491         struct i40e_hw  *hw = &pf->hw; 
2492         struct ixl_vsi  *vsi = &pf->vsi;
2493         device_t        dev = vsi->dev;
2494         struct i40e_aqc_get_switch_config_resp *sw_config;
2495         u8      aq_buf[I40E_AQ_LARGE_BUF];
2496         int     ret = I40E_SUCCESS;
2497         u16     next = 0;
2498
2499         memset(&aq_buf, 0, sizeof(aq_buf));
2500         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2501         ret = i40e_aq_get_switch_config(hw, sw_config,
2502             sizeof(aq_buf), &next, NULL);
2503         if (ret) {
2504                 device_printf(dev,"aq_get_switch_config failed!!\n");
2505                 return (ret);
2506         }
2507 #ifdef IXL_DEBUG
2508         printf("Switch config: header reported: %d in structure, %d total\n",
2509             sw_config->header.num_reported, sw_config->header.num_total);
2510         printf("type=%d seid=%d uplink=%d downlink=%d\n",
2511             sw_config->element[0].element_type,
2512             sw_config->element[0].seid,
2513             sw_config->element[0].uplink_seid,
2514             sw_config->element[0].downlink_seid);
2515 #endif
2516         /* Simplified due to a single VSI at the moment */
2517         vsi->seid = sw_config->element[0].seid;
2518         return (ret);
2519 }
2520
2521 /*********************************************************************
2522  *
2523  *  Initialize the VSI:  this handles contexts, which means things
2524  *                       like the number of descriptors, buffer size,
2525  *                       plus we init the rings thru this function.
2526  *
2527  **********************************************************************/
2528 static int
2529 ixl_initialize_vsi(struct ixl_vsi *vsi)
2530 {
2531         struct ixl_queue        *que = vsi->queues;
2532         device_t                dev = vsi->dev;
2533         struct i40e_hw          *hw = vsi->hw;
2534         struct i40e_vsi_context ctxt;
2535         int                     err = 0;
2536
2537         memset(&ctxt, 0, sizeof(ctxt));
2538         ctxt.seid = vsi->seid;
2539         ctxt.pf_num = hw->pf_id;
2540         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2541         if (err) {
2542                 device_printf(dev,"get vsi params failed %x!!\n", err);
2543                 return (err);
2544         }
2545 #ifdef IXL_DEBUG
2546         printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2547             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2548             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2549             ctxt.uplink_seid, ctxt.vsi_number,
2550             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2551             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2552             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2553 #endif
2554         /*
2555         ** Set the queue and traffic class bits
2556         **  - when multiple traffic classes are supported
2557         **    this will need to be more robust.
2558         */
2559         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2560         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2561         ctxt.info.queue_mapping[0] = 0; 
2562         ctxt.info.tc_mapping[0] = 0x0800; 
2563
2564         /* Set VLAN receive stripping mode */
2565         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2566         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2567         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2568             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2569         else
2570             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2571
2572         /* Keep copy of VSI info in VSI for statistic counters */
2573         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2574
2575         /* Reset VSI statistics */
2576         ixl_vsi_reset_stats(vsi);
2577         vsi->hw_filters_add = 0;
2578         vsi->hw_filters_del = 0;
2579
2580         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2581         if (err) {
2582                 device_printf(dev,"update vsi params failed %x!!\n",
2583                    hw->aq.asq_last_status);
2584                 return (err);
2585         }
2586
2587         for (int i = 0; i < vsi->num_queues; i++, que++) {
2588                 struct tx_ring          *txr = &que->txr;
2589                 struct rx_ring          *rxr = &que->rxr;
2590                 struct i40e_hmc_obj_txq tctx;
2591                 struct i40e_hmc_obj_rxq rctx;
2592                 u32                     txctl;
2593                 u16                     size;
2594
2595
2596                 /* Setup the HMC TX Context  */
2597                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2598                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2599                 tctx.new_context = 1;
2600                 tctx.base = (txr->dma.pa/128);
2601                 tctx.qlen = que->num_desc;
2602                 tctx.fc_ena = 0;
2603                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2604                 /* Enable HEAD writeback */
2605                 tctx.head_wb_ena = 1;
2606                 tctx.head_wb_addr = txr->dma.pa +
2607                     (que->num_desc * sizeof(struct i40e_tx_desc));
2608                 tctx.rdylist_act = 0;
2609                 err = i40e_clear_lan_tx_queue_context(hw, i);
2610                 if (err) {
2611                         device_printf(dev, "Unable to clear TX context\n");
2612                         break;
2613                 }
2614                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2615                 if (err) {
2616                         device_printf(dev, "Unable to set TX context\n");
2617                         break;
2618                 }
2619                 /* Associate the ring with this PF */
2620                 txctl = I40E_QTX_CTL_PF_QUEUE;
2621                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2622                     I40E_QTX_CTL_PF_INDX_MASK);
2623                 wr32(hw, I40E_QTX_CTL(i), txctl);
2624                 ixl_flush(hw);
2625
2626                 /* Do ring (re)init */
2627                 ixl_init_tx_ring(que);
2628
2629                 /* Next setup the HMC RX Context  */
2630                 if (vsi->max_frame_size <= 2048)
2631                         rxr->mbuf_sz = MCLBYTES;
2632                 else
2633                         rxr->mbuf_sz = MJUMPAGESIZE;
2634
2635                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2636
2637                 /* Set up an RX context for the HMC */
2638                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2639                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2640                 /* ignore header split for now */
2641                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2642                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2643                     vsi->max_frame_size : max_rxmax;
2644                 rctx.dtype = 0;
2645                 rctx.dsize = 1; /* do 32byte descriptors */
2646                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2647                 rctx.base = (rxr->dma.pa/128);
2648                 rctx.qlen = que->num_desc;
2649                 rctx.tphrdesc_ena = 1;
2650                 rctx.tphwdesc_ena = 1;
2651                 rctx.tphdata_ena = 0;
2652                 rctx.tphhead_ena = 0;
2653                 rctx.lrxqthresh = 2;
2654                 rctx.crcstrip = 1;
2655                 rctx.l2tsel = 1;
2656                 rctx.showiv = 1;
2657                 rctx.fc_ena = 0;
2658                 rctx.prefena = 1;
2659
2660                 err = i40e_clear_lan_rx_queue_context(hw, i);
2661                 if (err) {
2662                         device_printf(dev,
2663                             "Unable to clear RX context %d\n", i);
2664                         break;
2665                 }
2666                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2667                 if (err) {
2668                         device_printf(dev, "Unable to set RX context %d\n", i);
2669                         break;
2670                 }
2671                 err = ixl_init_rx_ring(que);
2672                 if (err) {
2673                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2674                         break;
2675                 }
2676                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2677 #ifdef DEV_NETMAP
2678                 /* preserve queue */
2679                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2680                         struct netmap_adapter *na = NA(vsi->ifp);
2681                         struct netmap_kring *kring = &na->rx_rings[i];
2682                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2683                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2684                 } else
2685 #endif /* DEV_NETMAP */
2686                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2687         }
2688         return (err);
2689 }
2690
2691
2692 /*********************************************************************
2693  *
2694  *  Free all VSI structs.
2695  *
2696  **********************************************************************/
2697 void
2698 ixl_free_vsi(struct ixl_vsi *vsi)
2699 {
2700         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2701         struct ixl_queue        *que = vsi->queues;
2702         struct ixl_mac_filter *f;
2703
2704         /* Free station queues */
2705         for (int i = 0; i < vsi->num_queues; i++, que++) {
2706                 struct tx_ring *txr = &que->txr;
2707                 struct rx_ring *rxr = &que->rxr;
2708         
2709                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2710                         continue;
2711                 IXL_TX_LOCK(txr);
2712                 ixl_free_que_tx(que);
2713                 if (txr->base)
2714                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2715                 IXL_TX_UNLOCK(txr);
2716                 IXL_TX_LOCK_DESTROY(txr);
2717
2718                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2719                         continue;
2720                 IXL_RX_LOCK(rxr);
2721                 ixl_free_que_rx(que);
2722                 if (rxr->base)
2723                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2724                 IXL_RX_UNLOCK(rxr);
2725                 IXL_RX_LOCK_DESTROY(rxr);
2726                 
2727         }
2728         free(vsi->queues, M_DEVBUF);
2729
2730         /* Free VSI filter list */
2731         while (!SLIST_EMPTY(&vsi->ftl)) {
2732                 f = SLIST_FIRST(&vsi->ftl);
2733                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2734                 free(f, M_DEVBUF);
2735         }
2736 }
2737
2738
2739 /*********************************************************************
2740  *
2741  *  Allocate memory for the VSI (virtual station interface) and their
2742  *  associated queues, rings and the descriptors associated with each,
2743  *  called only once at attach.
2744  *
2745  **********************************************************************/
2746 static int
2747 ixl_setup_stations(struct ixl_pf *pf)
2748 {
2749         device_t                dev = pf->dev;
2750         struct ixl_vsi          *vsi;
2751         struct ixl_queue        *que;
2752         struct tx_ring          *txr;
2753         struct rx_ring          *rxr;
2754         int                     rsize, tsize;
2755         int                     error = I40E_SUCCESS;
2756
2757         vsi = &pf->vsi;
2758         vsi->back = (void *)pf;
2759         vsi->hw = &pf->hw;
2760         vsi->id = 0;
2761         vsi->num_vlans = 0;
2762
2763         /* Get memory for the station queues */
2764         if (!(vsi->queues =
2765             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2766             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2767                 device_printf(dev, "Unable to allocate queue memory\n");
2768                 error = ENOMEM;
2769                 goto early;
2770         }
2771
2772         for (int i = 0; i < vsi->num_queues; i++) {
2773                 que = &vsi->queues[i];
2774                 que->num_desc = ixl_ringsz;
2775                 que->me = i;
2776                 que->vsi = vsi;
2777                 /* mark the queue as active */
2778                 vsi->active_queues |= (u64)1 << que->me;
2779                 txr = &que->txr;
2780                 txr->que = que;
2781                 txr->tail = I40E_QTX_TAIL(que->me);
2782
2783                 /* Initialize the TX lock */
2784                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2785                     device_get_nameunit(dev), que->me);
2786                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2787                 /* Create the TX descriptor ring */
2788                 tsize = roundup2((que->num_desc *
2789                     sizeof(struct i40e_tx_desc)) +
2790                     sizeof(u32), DBA_ALIGN);
2791                 if (i40e_allocate_dma_mem(&pf->hw,
2792                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2793                         device_printf(dev,
2794                             "Unable to allocate TX Descriptor memory\n");
2795                         error = ENOMEM;
2796                         goto fail;
2797                 }
2798                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2799                 bzero((void *)txr->base, tsize);
2800                 /* Now allocate transmit soft structs for the ring */
2801                 if (ixl_allocate_tx_data(que)) {
2802                         device_printf(dev,
2803                             "Critical Failure setting up TX structures\n");
2804                         error = ENOMEM;
2805                         goto fail;
2806                 }
2807                 /* Allocate a buf ring */
2808                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2809                     M_WAITOK, &txr->mtx);
2810                 if (txr->br == NULL) {
2811                         device_printf(dev,
2812                             "Critical Failure setting up TX buf ring\n");
2813                         error = ENOMEM;
2814                         goto fail;
2815                 }
2816
2817                 /*
2818                  * Next the RX queues...
2819                  */ 
2820                 rsize = roundup2(que->num_desc *
2821                     sizeof(union i40e_rx_desc), DBA_ALIGN);
2822                 rxr = &que->rxr;
2823                 rxr->que = que;
2824                 rxr->tail = I40E_QRX_TAIL(que->me);
2825
2826                 /* Initialize the RX side lock */
2827                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2828                     device_get_nameunit(dev), que->me);
2829                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2830
2831                 if (i40e_allocate_dma_mem(&pf->hw,
2832                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2833                         device_printf(dev,
2834                             "Unable to allocate RX Descriptor memory\n");
2835                         error = ENOMEM;
2836                         goto fail;
2837                 }
2838                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2839                 bzero((void *)rxr->base, rsize);
2840
2841                 /* Allocate receive soft structs for the ring*/
2842                 if (ixl_allocate_rx_data(que)) {
2843                         device_printf(dev,
2844                             "Critical Failure setting up receive structs\n");
2845                         error = ENOMEM;
2846                         goto fail;
2847                 }
2848         }
2849
2850         return (0);
2851
2852 fail:
2853         for (int i = 0; i < vsi->num_queues; i++) {
2854                 que = &vsi->queues[i];
2855                 rxr = &que->rxr;
2856                 txr = &que->txr;
2857                 if (rxr->base)
2858                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2859                 if (txr->base)
2860                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2861         }
2862
2863 early:
2864         return (error);
2865 }
2866
2867 /*
2868 ** Provide a update to the queue RX
2869 ** interrupt moderation value.
2870 */
2871 static void
2872 ixl_set_queue_rx_itr(struct ixl_queue *que)
2873 {
2874         struct ixl_vsi  *vsi = que->vsi;
2875         struct i40e_hw  *hw = vsi->hw;
2876         struct rx_ring  *rxr = &que->rxr;
2877         u16             rx_itr;
2878         u16             rx_latency = 0;
2879         int             rx_bytes;
2880
2881
2882         /* Idle, do nothing */
2883         if (rxr->bytes == 0)
2884                 return;
2885
2886         if (ixl_dynamic_rx_itr) {
2887                 rx_bytes = rxr->bytes/rxr->itr;
2888                 rx_itr = rxr->itr;
2889
2890                 /* Adjust latency range */
2891                 switch (rxr->latency) {
2892                 case IXL_LOW_LATENCY:
2893                         if (rx_bytes > 10) {
2894                                 rx_latency = IXL_AVE_LATENCY;
2895                                 rx_itr = IXL_ITR_20K;
2896                         }
2897                         break;
2898                 case IXL_AVE_LATENCY:
2899                         if (rx_bytes > 20) {
2900                                 rx_latency = IXL_BULK_LATENCY;
2901                                 rx_itr = IXL_ITR_8K;
2902                         } else if (rx_bytes <= 10) {
2903                                 rx_latency = IXL_LOW_LATENCY;
2904                                 rx_itr = IXL_ITR_100K;
2905                         }
2906                         break;
2907                 case IXL_BULK_LATENCY:
2908                         if (rx_bytes <= 20) {
2909                                 rx_latency = IXL_AVE_LATENCY;
2910                                 rx_itr = IXL_ITR_20K;
2911                         }
2912                         break;
2913                  }
2914
2915                 rxr->latency = rx_latency;
2916
2917                 if (rx_itr != rxr->itr) {
2918                         /* do an exponential smoothing */
2919                         rx_itr = (10 * rx_itr * rxr->itr) /
2920                             ((9 * rx_itr) + rxr->itr);
2921                         rxr->itr = rx_itr & IXL_MAX_ITR;
2922                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2923                             que->me), rxr->itr);
2924                 }
2925         } else { /* We may have have toggled to non-dynamic */
2926                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2927                         vsi->rx_itr_setting = ixl_rx_itr;
2928                 /* Update the hardware if needed */
2929                 if (rxr->itr != vsi->rx_itr_setting) {
2930                         rxr->itr = vsi->rx_itr_setting;
2931                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2932                             que->me), rxr->itr);
2933                 }
2934         }
2935         rxr->bytes = 0;
2936         rxr->packets = 0;
2937         return;
2938 }
2939
2940
2941 /*
2942 ** Provide a update to the queue TX
2943 ** interrupt moderation value.
2944 */
2945 static void
2946 ixl_set_queue_tx_itr(struct ixl_queue *que)
2947 {
2948         struct ixl_vsi  *vsi = que->vsi;
2949         struct i40e_hw  *hw = vsi->hw;
2950         struct tx_ring  *txr = &que->txr;
2951         u16             tx_itr;
2952         u16             tx_latency = 0;
2953         int             tx_bytes;
2954
2955
2956         /* Idle, do nothing */
2957         if (txr->bytes == 0)
2958                 return;
2959
2960         if (ixl_dynamic_tx_itr) {
2961                 tx_bytes = txr->bytes/txr->itr;
2962                 tx_itr = txr->itr;
2963
2964                 switch (txr->latency) {
2965                 case IXL_LOW_LATENCY:
2966                         if (tx_bytes > 10) {
2967                                 tx_latency = IXL_AVE_LATENCY;
2968                                 tx_itr = IXL_ITR_20K;
2969                         }
2970                         break;
2971                 case IXL_AVE_LATENCY:
2972                         if (tx_bytes > 20) {
2973                                 tx_latency = IXL_BULK_LATENCY;
2974                                 tx_itr = IXL_ITR_8K;
2975                         } else if (tx_bytes <= 10) {
2976                                 tx_latency = IXL_LOW_LATENCY;
2977                                 tx_itr = IXL_ITR_100K;
2978                         }
2979                         break;
2980                 case IXL_BULK_LATENCY:
2981                         if (tx_bytes <= 20) {
2982                                 tx_latency = IXL_AVE_LATENCY;
2983                                 tx_itr = IXL_ITR_20K;
2984                         }
2985                         break;
2986                 }
2987
2988                 txr->latency = tx_latency;
2989
2990                 if (tx_itr != txr->itr) {
2991                  /* do an exponential smoothing */
2992                         tx_itr = (10 * tx_itr * txr->itr) /
2993                             ((9 * tx_itr) + txr->itr);
2994                         txr->itr = tx_itr & IXL_MAX_ITR;
2995                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2996                             que->me), txr->itr);
2997                 }
2998
2999         } else { /* We may have have toggled to non-dynamic */
3000                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3001                         vsi->tx_itr_setting = ixl_tx_itr;
3002                 /* Update the hardware if needed */
3003                 if (txr->itr != vsi->tx_itr_setting) {
3004                         txr->itr = vsi->tx_itr_setting;
3005                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3006                             que->me), txr->itr);
3007                 }
3008         }
3009         txr->bytes = 0;
3010         txr->packets = 0;
3011         return;
3012 }
3013
3014
3015 static void
3016 ixl_add_hw_stats(struct ixl_pf *pf)
3017 {
3018         device_t dev = pf->dev;
3019         struct ixl_vsi *vsi = &pf->vsi;
3020         struct ixl_queue *queues = vsi->queues;
3021         struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3022         struct i40e_hw_port_stats *pf_stats = &pf->stats;
3023
3024         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3025         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3026         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3027
3028         struct sysctl_oid *vsi_node, *queue_node;
3029         struct sysctl_oid_list *vsi_list, *queue_list;
3030
3031         struct tx_ring *txr;
3032         struct rx_ring *rxr;
3033
3034         /* Driver statistics */
3035         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3036                         CTLFLAG_RD, &pf->watchdog_events,
3037                         "Watchdog timeouts");
3038         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3039                         CTLFLAG_RD, &pf->admin_irq,
3040                         "Admin Queue IRQ Handled");
3041
3042         /* VSI statistics */
3043 #define QUEUE_NAME_LEN 32
3044         char queue_namebuf[QUEUE_NAME_LEN];
3045         
3046         // ERJ: Only one vsi now, re-do when >1 VSI enabled
3047         // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3048         vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3049                                    CTLFLAG_RD, NULL, "VSI-specific stats");
3050         vsi_list = SYSCTL_CHILDREN(vsi_node);
3051
3052         ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3053
3054         /* Queue statistics */
3055         for (int q = 0; q < vsi->num_queues; q++) {
3056                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3057                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3058                                              CTLFLAG_RD, NULL, "Queue #");
3059                 queue_list = SYSCTL_CHILDREN(queue_node);
3060
3061                 txr = &(queues[q].txr);
3062                 rxr = &(queues[q].rxr);
3063
3064                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3065                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3066                                 "m_defrag() failed");
3067                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3068                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3069                                 "Driver dropped packets");
3070                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3071                                 CTLFLAG_RD, &(queues[q].irqs),
3072                                 "irqs on this queue");
3073                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3074                                 CTLFLAG_RD, &(queues[q].tso),
3075                                 "TSO");
3076                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3077                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3078                                 "Driver tx dma failure in xmit");
3079                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3080                                 CTLFLAG_RD, &(txr->no_desc),
3081                                 "Queue No Descriptor Available");
3082                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3083                                 CTLFLAG_RD, &(txr->total_packets),
3084                                 "Queue Packets Transmitted");
3085                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3086                                 CTLFLAG_RD, &(txr->tx_bytes),
3087                                 "Queue Bytes Transmitted");
3088                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3089                                 CTLFLAG_RD, &(rxr->rx_packets),
3090                                 "Queue Packets Received");
3091                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3092                                 CTLFLAG_RD, &(rxr->rx_bytes),
3093                                 "Queue Bytes Received");
3094         }
3095
3096         /* MAC stats */
3097         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3098 }
3099
3100 static void
3101 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3102         struct sysctl_oid_list *child,
3103         struct i40e_eth_stats *eth_stats)
3104 {
3105         struct ixl_sysctl_info ctls[] =
3106         {
3107                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3108                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3109                         "Unicast Packets Received"},
3110                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3111                         "Multicast Packets Received"},
3112                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3113                         "Broadcast Packets Received"},
3114                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3115                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3116                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3117                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3118                         "Multicast Packets Transmitted"},
3119                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3120                         "Broadcast Packets Transmitted"},
3121                 // end
3122                 {0,0,0}
3123         };
3124
3125         struct ixl_sysctl_info *entry = ctls;
3126         while (entry->stat != 0)
3127         {
3128                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3129                                 CTLFLAG_RD, entry->stat,
3130                                 entry->description);
3131                 entry++;
3132         }
3133 }
3134
3135 static void
3136 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3137         struct sysctl_oid_list *child,
3138         struct i40e_hw_port_stats *stats)
3139 {
3140         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3141                                     CTLFLAG_RD, NULL, "Mac Statistics");
3142         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3143
3144         struct i40e_eth_stats *eth_stats = &stats->eth;
3145         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3146
3147         struct ixl_sysctl_info ctls[] = 
3148         {
3149                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3150                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3151                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3152                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3153                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3154                 /* Packet Reception Stats */
3155                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3156                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3157                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3158                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3159                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3160                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3161                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3162                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3163                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3164                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3165                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3166                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3167                 /* Packet Transmission Stats */
3168                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3169                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3170                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3171                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3172                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3173                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3174                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3175                 /* Flow control */
3176                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3177                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3178                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3179                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3180                 /* End */
3181                 {0,0,0}
3182         };
3183
3184         struct ixl_sysctl_info *entry = ctls;
3185         while (entry->stat != 0)
3186         {
3187                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3188                                 CTLFLAG_RD, entry->stat,
3189                                 entry->description);
3190                 entry++;
3191         }
3192 }
3193
3194 /*
3195 ** ixl_config_rss - setup RSS 
3196 **  - note this is done for the single vsi
3197 */
3198 static void ixl_config_rss(struct ixl_vsi *vsi)
3199 {
3200         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3201         struct i40e_hw  *hw = vsi->hw;
3202         u32             lut = 0;
3203         u64             set_hena = 0, hena;
3204         int             i, j, que_id;
3205 #ifdef RSS
3206         u32             rss_hash_config;
3207         u32             rss_seed[IXL_KEYSZ];
3208 #else
3209         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3210                             0x183cfd8c, 0xce880440, 0x580cbc3c,
3211                             0x35897377, 0x328b25e1, 0x4fa98922,
3212                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3213 #endif
3214
3215 #ifdef RSS
3216         /* Fetch the configured RSS key */
3217         rss_getkey((uint8_t *) &rss_seed);
3218 #endif
3219
3220         /* Fill out hash function seed */
3221         for (i = 0; i < IXL_KEYSZ; i++)
3222                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3223
3224         /* Enable PCTYPES for RSS: */
3225 #ifdef RSS
3226         rss_hash_config = rss_gethashconfig();
3227         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3228                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3229         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3230                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3231         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3232                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3233         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3234                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3235         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3236                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3237         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3238                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3239         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3240                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3241 #else
3242         set_hena =
3243                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3244                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3245                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3246                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3247                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3248                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3249                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3250                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3251                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3252                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3253                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3254 #endif
3255         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3256             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3257         hena |= set_hena;
3258         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3259         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3260
3261         /* Populate the LUT with max no. of queues in round robin fashion */
3262         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3263                 if (j == vsi->num_queues)
3264                         j = 0;
3265 #ifdef RSS
3266                 /*
3267                  * Fetch the RSS bucket id for the given indirection entry.
3268                  * Cap it at the number of configured buckets (which is
3269                  * num_queues.)
3270                  */
3271                 que_id = rss_get_indirection_to_bucket(i);
3272                 que_id = que_id % vsi->num_queues;
3273 #else
3274                 que_id = j;
3275 #endif
3276                 /* lut = 4-byte sliding window of 4 lut entries */
3277                 lut = (lut << 8) | (que_id &
3278                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3279                 /* On i = 3, we have 4 entries in lut; write to the register */
3280                 if ((i & 3) == 3)
3281                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3282         }
3283         ixl_flush(hw);
3284 }
3285
3286
3287 /*
3288 ** This routine is run via an vlan config EVENT,
3289 ** it enables us to use the HW Filter table since
3290 ** we can get the vlan id. This just creates the
3291 ** entry in the soft version of the VFTA, init will
3292 ** repopulate the real table.
3293 */
3294 static void
3295 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3296 {
3297         struct ixl_vsi  *vsi = ifp->if_softc;
3298         struct i40e_hw  *hw = vsi->hw;
3299         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3300
3301         if (ifp->if_softc !=  arg)   /* Not our event */
3302                 return;
3303
3304         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3305                 return;
3306
3307         IXL_PF_LOCK(pf);
3308         ++vsi->num_vlans;
3309         ixl_add_filter(vsi, hw->mac.addr, vtag);
3310         IXL_PF_UNLOCK(pf);
3311 }
3312
3313 /*
3314 ** This routine is run via an vlan
3315 ** unconfig EVENT, remove our entry
3316 ** in the soft vfta.
3317 */
3318 static void
3319 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3320 {
3321         struct ixl_vsi  *vsi = ifp->if_softc;
3322         struct i40e_hw  *hw = vsi->hw;
3323         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3324
3325         if (ifp->if_softc !=  arg)
3326                 return;
3327
3328         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3329                 return;
3330
3331         IXL_PF_LOCK(pf);
3332         --vsi->num_vlans;
3333         ixl_del_filter(vsi, hw->mac.addr, vtag);
3334         IXL_PF_UNLOCK(pf);
3335 }
3336
3337 /*
3338 ** This routine updates vlan filters, called by init
3339 ** it scans the filter table and then updates the hw
3340 ** after a soft reset.
3341 */
3342 static void
3343 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3344 {
3345         struct ixl_mac_filter   *f;
3346         int                     cnt = 0, flags;
3347
3348         if (vsi->num_vlans == 0)
3349                 return;
3350         /*
3351         ** Scan the filter list for vlan entries,
3352         ** mark them for addition and then call
3353         ** for the AQ update.
3354         */
3355         SLIST_FOREACH(f, &vsi->ftl, next) {
3356                 if (f->flags & IXL_FILTER_VLAN) {
3357                         f->flags |=
3358                             (IXL_FILTER_ADD |
3359                             IXL_FILTER_USED);
3360                         cnt++;
3361                 }
3362         }
3363         if (cnt == 0) {
3364                 printf("setup vlan: no filters found!\n");
3365                 return;
3366         }
3367         flags = IXL_FILTER_VLAN;
3368         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3369         ixl_add_hw_filters(vsi, flags, cnt);
3370         return;
3371 }
3372
3373 /*
3374 ** Initialize filter list and add filters that the hardware
3375 ** needs to know about.
3376 */
3377 static void
3378 ixl_init_filters(struct ixl_vsi *vsi)
3379 {
3380         /* Add broadcast address */
3381         u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3382         ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3383 }
3384
3385 /*
3386 ** This routine adds mulicast filters
3387 */
3388 static void
3389 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3390 {
3391         struct ixl_mac_filter *f;
3392
3393         /* Does one already exist */
3394         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3395         if (f != NULL)
3396                 return;
3397
3398         f = ixl_get_filter(vsi);
3399         if (f == NULL) {
3400                 printf("WARNING: no filter available!!\n");
3401                 return;
3402         }
3403         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3404         f->vlan = IXL_VLAN_ANY;
3405         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3406             | IXL_FILTER_MC);
3407
3408         return;
3409 }
3410
3411 /*
3412 ** This routine adds macvlan filters
3413 */
3414 static void
3415 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3416 {
3417         struct ixl_mac_filter   *f, *tmp;
3418         device_t                dev = vsi->dev;
3419
3420         DEBUGOUT("ixl_add_filter: begin");
3421
3422         /* Does one already exist */
3423         f = ixl_find_filter(vsi, macaddr, vlan);
3424         if (f != NULL)
3425                 return;
3426         /*
3427         ** Is this the first vlan being registered, if so we
3428         ** need to remove the ANY filter that indicates we are
3429         ** not in a vlan, and replace that with a 0 filter.
3430         */
3431         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3432                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3433                 if (tmp != NULL) {
3434                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3435                         ixl_add_filter(vsi, macaddr, 0);
3436                 }
3437         }
3438
3439         f = ixl_get_filter(vsi);
3440         if (f == NULL) {
3441                 device_printf(dev, "WARNING: no filter available!!\n");
3442                 return;
3443         }
3444         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3445         f->vlan = vlan;
3446         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3447         if (f->vlan != IXL_VLAN_ANY)
3448                 f->flags |= IXL_FILTER_VLAN;
3449
3450         ixl_add_hw_filters(vsi, f->flags, 1);
3451         return;
3452 }
3453
3454 static void
3455 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3456 {
3457         struct ixl_mac_filter *f;
3458
3459         f = ixl_find_filter(vsi, macaddr, vlan);
3460         if (f == NULL)
3461                 return;
3462
3463         f->flags |= IXL_FILTER_DEL;
3464         ixl_del_hw_filters(vsi, 1);
3465
3466         /* Check if this is the last vlan removal */
3467         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3468                 /* Switch back to a non-vlan filter */
3469                 ixl_del_filter(vsi, macaddr, 0);
3470                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3471         }
3472         return;
3473 }
3474
3475 /*
3476 ** Find the filter with both matching mac addr and vlan id
3477 */
3478 static struct ixl_mac_filter *
3479 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3480 {
3481         struct ixl_mac_filter   *f;
3482         bool                    match = FALSE;
3483
3484         SLIST_FOREACH(f, &vsi->ftl, next) {
3485                 if (!cmp_etheraddr(f->macaddr, macaddr))
3486                         continue;
3487                 if (f->vlan == vlan) {
3488                         match = TRUE;
3489                         break;
3490                 }
3491         }       
3492
3493         if (!match)
3494                 f = NULL;
3495         return (f);
3496 }
3497
3498 /*
3499 ** This routine takes additions to the vsi filter
3500 ** table and creates an Admin Queue call to create
3501 ** the filters in the hardware.
3502 */
3503 static void
3504 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3505 {
3506         struct i40e_aqc_add_macvlan_element_data *a, *b;
3507         struct ixl_mac_filter   *f;
3508         struct i40e_hw  *hw = vsi->hw;
3509         device_t        dev = vsi->dev;
3510         int             err, j = 0;
3511
3512         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3513             M_DEVBUF, M_NOWAIT | M_ZERO);
3514         if (a == NULL) {
3515                 device_printf(dev, "add_hw_filters failed to get memory\n");
3516                 return;
3517         }
3518
3519         /*
3520         ** Scan the filter list, each time we find one
3521         ** we add it to the admin queue array and turn off
3522         ** the add bit.
3523         */
3524         SLIST_FOREACH(f, &vsi->ftl, next) {
3525                 if (f->flags == flags) {
3526                         b = &a[j]; // a pox on fvl long names :)
3527                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3528                         b->vlan_tag =
3529                             (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3530                         b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3531                         f->flags &= ~IXL_FILTER_ADD;
3532                         j++;
3533                 }
3534                 if (j == cnt)
3535                         break;
3536         }
3537         if (j > 0) {
3538                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3539                 if (err) 
3540                         device_printf(dev, "aq_add_macvlan err %d, "
3541                             "aq_error %d\n", err, hw->aq.asq_last_status);
3542                 else
3543                         vsi->hw_filters_add += j;
3544         }
3545         free(a, M_DEVBUF);
3546         return;
3547 }
3548
3549 /*
3550 ** This routine takes removals in the vsi filter
3551 ** table and creates an Admin Queue call to delete
3552 ** the filters in the hardware.
3553 */
3554 static void
3555 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3556 {
3557         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3558         struct i40e_hw          *hw = vsi->hw;
3559         device_t                dev = vsi->dev;
3560         struct ixl_mac_filter   *f, *f_temp;
3561         int                     err, j = 0;
3562
3563         DEBUGOUT("ixl_del_hw_filters: begin\n");
3564
3565         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3566             M_DEVBUF, M_NOWAIT | M_ZERO);
3567         if (d == NULL) {
3568                 printf("del hw filter failed to get memory\n");
3569                 return;
3570         }
3571
3572         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3573                 if (f->flags & IXL_FILTER_DEL) {
3574                         e = &d[j]; // a pox on fvl long names :)
3575                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3576                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3577                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3578                         /* delete entry from vsi list */
3579                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3580                         free(f, M_DEVBUF);
3581                         j++;
3582                 }
3583                 if (j == cnt)
3584                         break;
3585         }
3586         if (j > 0) {
3587                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3588                 /* NOTE: returns ENOENT every time but seems to work fine,
3589                    so we'll ignore that specific error. */
3590                 // TODO: Does this still occur on current firmwares?
3591                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3592                         int sc = 0;
3593                         for (int i = 0; i < j; i++)
3594                                 sc += (!d[i].error_code);
3595                         vsi->hw_filters_del += sc;
3596                         device_printf(dev,
3597                             "Failed to remove %d/%d filters, aq error %d\n",
3598                             j - sc, j, hw->aq.asq_last_status);
3599                 } else
3600                         vsi->hw_filters_del += j;
3601         }
3602         free(d, M_DEVBUF);
3603
3604         DEBUGOUT("ixl_del_hw_filters: end\n");
3605         return;
3606 }
3607
3608
3609 static void
3610 ixl_enable_rings(struct ixl_vsi *vsi)
3611 {
3612         struct i40e_hw  *hw = vsi->hw;
3613         u32             reg;
3614
3615         for (int i = 0; i < vsi->num_queues; i++) {
3616                 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3617
3618                 reg = rd32(hw, I40E_QTX_ENA(i));
3619                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3620                     I40E_QTX_ENA_QENA_STAT_MASK;
3621                 wr32(hw, I40E_QTX_ENA(i), reg);
3622                 /* Verify the enable took */
3623                 for (int j = 0; j < 10; j++) {
3624                         reg = rd32(hw, I40E_QTX_ENA(i));
3625                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3626                                 break;
3627                         i40e_msec_delay(10);
3628                 }
3629                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3630                         printf("TX queue %d disabled!\n", i);
3631
3632                 reg = rd32(hw, I40E_QRX_ENA(i));
3633                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3634                     I40E_QRX_ENA_QENA_STAT_MASK;
3635                 wr32(hw, I40E_QRX_ENA(i), reg);
3636                 /* Verify the enable took */
3637                 for (int j = 0; j < 10; j++) {
3638                         reg = rd32(hw, I40E_QRX_ENA(i));
3639                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3640                                 break;
3641                         i40e_msec_delay(10);
3642                 }
3643                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3644                         printf("RX queue %d disabled!\n", i);
3645         }
3646 }
3647
3648 static void
3649 ixl_disable_rings(struct ixl_vsi *vsi)
3650 {
3651         struct i40e_hw  *hw = vsi->hw;
3652         u32             reg;
3653
3654         for (int i = 0; i < vsi->num_queues; i++) {
3655                 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3656                 i40e_usec_delay(500);
3657
3658                 reg = rd32(hw, I40E_QTX_ENA(i));
3659                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3660                 wr32(hw, I40E_QTX_ENA(i), reg);
3661                 /* Verify the disable took */
3662                 for (int j = 0; j < 10; j++) {
3663                         reg = rd32(hw, I40E_QTX_ENA(i));
3664                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3665                                 break;
3666                         i40e_msec_delay(10);
3667                 }
3668                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3669                         printf("TX queue %d still enabled!\n", i);
3670
3671                 reg = rd32(hw, I40E_QRX_ENA(i));
3672                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3673                 wr32(hw, I40E_QRX_ENA(i), reg);
3674                 /* Verify the disable took */
3675                 for (int j = 0; j < 10; j++) {
3676                         reg = rd32(hw, I40E_QRX_ENA(i));
3677                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3678                                 break;
3679                         i40e_msec_delay(10);
3680                 }
3681                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3682                         printf("RX queue %d still enabled!\n", i);
3683         }
3684 }
3685
3686 /**
3687  * ixl_handle_mdd_event
3688  *
3689  * Called from interrupt handler to identify possibly malicious vfs
3690  * (But also detects events from the PF, as well)
3691  **/
3692 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3693 {
3694         struct i40e_hw *hw = &pf->hw;
3695         device_t dev = pf->dev;
3696         bool mdd_detected = false;
3697         bool pf_mdd_detected = false;
3698         u32 reg;
3699
3700         /* find what triggered the MDD event */
3701         reg = rd32(hw, I40E_GL_MDET_TX);
3702         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3703                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3704                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3705                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3706                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3707                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3708                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3709                 device_printf(dev,
3710                          "Malicious Driver Detection event 0x%02x"
3711                          " on TX queue %d pf number 0x%02x\n",
3712                          event, queue, pf_num);
3713                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3714                 mdd_detected = true;
3715         }
3716         reg = rd32(hw, I40E_GL_MDET_RX);
3717         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3718                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3719                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3720                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3721                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3722                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3723                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3724                 device_printf(dev,
3725                          "Malicious Driver Detection event 0x%02x"
3726                          " on RX queue %d of function 0x%02x\n",
3727                          event, queue, func);
3728                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3729                 mdd_detected = true;
3730         }
3731
3732         if (mdd_detected) {
3733                 reg = rd32(hw, I40E_PF_MDET_TX);
3734                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3735                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3736                         device_printf(dev,
3737                                  "MDD TX event is for this function 0x%08x",
3738                                  reg);
3739                         pf_mdd_detected = true;
3740                 }
3741                 reg = rd32(hw, I40E_PF_MDET_RX);
3742                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3743                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3744                         device_printf(dev,
3745                                  "MDD RX event is for this function 0x%08x",
3746                                  reg);
3747                         pf_mdd_detected = true;
3748                 }
3749         }
3750
3751         /* re-enable mdd interrupt cause */
3752         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3753         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3754         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3755         ixl_flush(hw);
3756 }
3757
3758 static void
3759 ixl_enable_intr(struct ixl_vsi *vsi)
3760 {
3761         struct i40e_hw          *hw = vsi->hw;
3762         struct ixl_queue        *que = vsi->queues;
3763
3764         if (ixl_enable_msix) {
3765                 ixl_enable_adminq(hw);
3766                 for (int i = 0; i < vsi->num_queues; i++, que++)
3767                         ixl_enable_queue(hw, que->me);
3768         } else
3769                 ixl_enable_legacy(hw);
3770 }
3771
3772 static void
3773 ixl_disable_intr(struct ixl_vsi *vsi)
3774 {
3775         struct i40e_hw          *hw = vsi->hw;
3776         struct ixl_queue        *que = vsi->queues;
3777
3778         if (ixl_enable_msix) {
3779                 ixl_disable_adminq(hw);
3780                 for (int i = 0; i < vsi->num_queues; i++, que++)
3781                         ixl_disable_queue(hw, que->me);
3782         } else
3783                 ixl_disable_legacy(hw);
3784 }
3785
3786 static void
3787 ixl_enable_adminq(struct i40e_hw *hw)
3788 {
3789         u32             reg;
3790
3791         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3792             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3793             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3794         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3795         ixl_flush(hw);
3796         return;
3797 }
3798
3799 static void
3800 ixl_disable_adminq(struct i40e_hw *hw)
3801 {
3802         u32             reg;
3803
3804         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3805         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3806
3807         return;
3808 }
3809
3810 static void
3811 ixl_enable_queue(struct i40e_hw *hw, int id)
3812 {
3813         u32             reg;
3814
3815         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3816             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3817             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3818         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3819 }
3820
3821 static void
3822 ixl_disable_queue(struct i40e_hw *hw, int id)
3823 {
3824         u32             reg;
3825
3826         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3827         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3828
3829         return;
3830 }
3831
3832 static void
3833 ixl_enable_legacy(struct i40e_hw *hw)
3834 {
3835         u32             reg;
3836         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3837             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3838             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3839         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3840 }
3841
3842 static void
3843 ixl_disable_legacy(struct i40e_hw *hw)
3844 {
3845         u32             reg;
3846
3847         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3848         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3849
3850         return;
3851 }
3852
3853 static void
3854 ixl_update_stats_counters(struct ixl_pf *pf)
3855 {
3856         struct i40e_hw  *hw = &pf->hw;
3857         struct ixl_vsi *vsi = &pf->vsi;
3858
3859         struct i40e_hw_port_stats *nsd = &pf->stats;
3860         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3861
3862         /* Update hw stats */
3863         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3864                            pf->stat_offsets_loaded,
3865                            &osd->crc_errors, &nsd->crc_errors);
3866         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3867                            pf->stat_offsets_loaded,
3868                            &osd->illegal_bytes, &nsd->illegal_bytes);
3869         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3870                            I40E_GLPRT_GORCL(hw->port),
3871                            pf->stat_offsets_loaded,
3872                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3873         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3874                            I40E_GLPRT_GOTCL(hw->port),
3875                            pf->stat_offsets_loaded,
3876                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3877         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3878                            pf->stat_offsets_loaded,
3879                            &osd->eth.rx_discards,
3880                            &nsd->eth.rx_discards);
3881         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3882                            I40E_GLPRT_UPRCL(hw->port),
3883                            pf->stat_offsets_loaded,
3884                            &osd->eth.rx_unicast,
3885                            &nsd->eth.rx_unicast);
3886         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3887                            I40E_GLPRT_UPTCL(hw->port),
3888                            pf->stat_offsets_loaded,
3889                            &osd->eth.tx_unicast,
3890                            &nsd->eth.tx_unicast);
3891         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3892                            I40E_GLPRT_MPRCL(hw->port),
3893                            pf->stat_offsets_loaded,
3894                            &osd->eth.rx_multicast,
3895                            &nsd->eth.rx_multicast);
3896         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3897                            I40E_GLPRT_MPTCL(hw->port),
3898                            pf->stat_offsets_loaded,
3899                            &osd->eth.tx_multicast,
3900                            &nsd->eth.tx_multicast);
3901         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3902                            I40E_GLPRT_BPRCL(hw->port),
3903                            pf->stat_offsets_loaded,
3904                            &osd->eth.rx_broadcast,
3905                            &nsd->eth.rx_broadcast);
3906         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3907                            I40E_GLPRT_BPTCL(hw->port),
3908                            pf->stat_offsets_loaded,
3909                            &osd->eth.tx_broadcast,
3910                            &nsd->eth.tx_broadcast);
3911
3912         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3913                            pf->stat_offsets_loaded,
3914                            &osd->tx_dropped_link_down,
3915                            &nsd->tx_dropped_link_down);
3916         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3917                            pf->stat_offsets_loaded,
3918                            &osd->mac_local_faults,
3919                            &nsd->mac_local_faults);
3920         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3921                            pf->stat_offsets_loaded,
3922                            &osd->mac_remote_faults,
3923                            &nsd->mac_remote_faults);
3924         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3925                            pf->stat_offsets_loaded,
3926                            &osd->rx_length_errors,
3927                            &nsd->rx_length_errors);
3928
3929         /* Flow control (LFC) stats */
3930         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3931                            pf->stat_offsets_loaded,
3932                            &osd->link_xon_rx, &nsd->link_xon_rx);
3933         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3934                            pf->stat_offsets_loaded,
3935                            &osd->link_xon_tx, &nsd->link_xon_tx);
3936         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3937                            pf->stat_offsets_loaded,
3938                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3939         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3940                            pf->stat_offsets_loaded,
3941                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3942
3943         /* Packet size stats rx */
3944         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3945                            I40E_GLPRT_PRC64L(hw->port),
3946                            pf->stat_offsets_loaded,
3947                            &osd->rx_size_64, &nsd->rx_size_64);
3948         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3949                            I40E_GLPRT_PRC127L(hw->port),
3950                            pf->stat_offsets_loaded,
3951                            &osd->rx_size_127, &nsd->rx_size_127);
3952         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3953                            I40E_GLPRT_PRC255L(hw->port),
3954                            pf->stat_offsets_loaded,
3955                            &osd->rx_size_255, &nsd->rx_size_255);
3956         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3957                            I40E_GLPRT_PRC511L(hw->port),
3958                            pf->stat_offsets_loaded,
3959                            &osd->rx_size_511, &nsd->rx_size_511);
3960         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3961                            I40E_GLPRT_PRC1023L(hw->port),
3962                            pf->stat_offsets_loaded,
3963                            &osd->rx_size_1023, &nsd->rx_size_1023);
3964         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3965                            I40E_GLPRT_PRC1522L(hw->port),
3966                            pf->stat_offsets_loaded,
3967                            &osd->rx_size_1522, &nsd->rx_size_1522);
3968         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3969                            I40E_GLPRT_PRC9522L(hw->port),
3970                            pf->stat_offsets_loaded,
3971                            &osd->rx_size_big, &nsd->rx_size_big);
3972
3973         /* Packet size stats tx */
3974         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3975                            I40E_GLPRT_PTC64L(hw->port),
3976                            pf->stat_offsets_loaded,
3977                            &osd->tx_size_64, &nsd->tx_size_64);
3978         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3979                            I40E_GLPRT_PTC127L(hw->port),
3980                            pf->stat_offsets_loaded,
3981                            &osd->tx_size_127, &nsd->tx_size_127);
3982         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3983                            I40E_GLPRT_PTC255L(hw->port),
3984                            pf->stat_offsets_loaded,
3985                            &osd->tx_size_255, &nsd->tx_size_255);
3986         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3987                            I40E_GLPRT_PTC511L(hw->port),
3988                            pf->stat_offsets_loaded,
3989                            &osd->tx_size_511, &nsd->tx_size_511);
3990         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3991                            I40E_GLPRT_PTC1023L(hw->port),
3992                            pf->stat_offsets_loaded,
3993                            &osd->tx_size_1023, &nsd->tx_size_1023);
3994         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3995                            I40E_GLPRT_PTC1522L(hw->port),
3996                            pf->stat_offsets_loaded,
3997                            &osd->tx_size_1522, &nsd->tx_size_1522);
3998         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3999                            I40E_GLPRT_PTC9522L(hw->port),
4000                            pf->stat_offsets_loaded,
4001                            &osd->tx_size_big, &nsd->tx_size_big);
4002
4003         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4004                            pf->stat_offsets_loaded,
4005                            &osd->rx_undersize, &nsd->rx_undersize);
4006         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4007                            pf->stat_offsets_loaded,
4008                            &osd->rx_fragments, &nsd->rx_fragments);
4009         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4010                            pf->stat_offsets_loaded,
4011                            &osd->rx_oversize, &nsd->rx_oversize);
4012         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4013                            pf->stat_offsets_loaded,
4014                            &osd->rx_jabber, &nsd->rx_jabber);
4015         pf->stat_offsets_loaded = true;
4016         /* End hw stats */
4017
4018         /* Update vsi stats */
4019         ixl_update_eth_stats(vsi);
4020
4021         /* OS statistics */
4022         // ERJ - these are per-port, update all vsis?
4023         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4024 }
4025
4026 /*
4027 ** Tasklet handler for MSIX Adminq interrupts
4028 **  - do outside interrupt since it might sleep
4029 */
4030 static void
4031 ixl_do_adminq(void *context, int pending)
4032 {
4033         struct ixl_pf                   *pf = context;
4034         struct i40e_hw                  *hw = &pf->hw;
4035         struct ixl_vsi                  *vsi = &pf->vsi;
4036         struct i40e_arq_event_info      event;
4037         i40e_status                     ret;
4038         u32                             reg, loop = 0;
4039         u16                             opcode, result;
4040
4041         event.buf_len = IXL_AQ_BUF_SZ;
4042         event.msg_buf = malloc(event.buf_len,
4043             M_DEVBUF, M_NOWAIT | M_ZERO);
4044         if (!event.msg_buf) {
4045                 printf("Unable to allocate adminq memory\n");
4046                 return;
4047         }
4048
4049         /* clean and process any events */
4050         do {
4051                 ret = i40e_clean_arq_element(hw, &event, &result);
4052                 if (ret)
4053                         break;
4054                 opcode = LE16_TO_CPU(event.desc.opcode);
4055                 switch (opcode) {
4056                 case i40e_aqc_opc_get_link_status:
4057                         vsi->link_up = ixl_config_link(hw);
4058                         ixl_update_link_status(pf);
4059                         break;
4060                 case i40e_aqc_opc_send_msg_to_pf:
4061                         /* process pf/vf communication here */
4062                         break;
4063                 case i40e_aqc_opc_event_lan_overflow:
4064                         break;
4065                 default:
4066 #ifdef IXL_DEBUG
4067                         printf("AdminQ unknown event %x\n", opcode);
4068 #endif
4069                         break;
4070                 }
4071
4072         } while (result && (loop++ < IXL_ADM_LIMIT));
4073
4074         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4075         reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4076         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4077         free(event.msg_buf, M_DEVBUF);
4078
4079         if (pf->msix > 1)
4080                 ixl_enable_adminq(&pf->hw);
4081         else
4082                 ixl_enable_intr(vsi);
4083 }
4084
4085 static int
4086 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4087 {
4088         struct ixl_pf   *pf;
4089         int             error, input = 0;
4090
4091         error = sysctl_handle_int(oidp, &input, 0, req);
4092
4093         if (error || !req->newptr)
4094                 return (error);
4095
4096         if (input == 1) {
4097                 pf = (struct ixl_pf *)arg1;
4098                 ixl_print_debug_info(pf);
4099         }
4100
4101         return (error);
4102 }
4103
4104 static void
4105 ixl_print_debug_info(struct ixl_pf *pf)
4106 {
4107         struct i40e_hw          *hw = &pf->hw;
4108         struct ixl_vsi          *vsi = &pf->vsi;
4109         struct ixl_queue        *que = vsi->queues;
4110         struct rx_ring          *rxr = &que->rxr;
4111         struct tx_ring          *txr = &que->txr;
4112         u32                     reg;    
4113
4114
4115         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4116         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4117         printf("RX next check = %x\n", rxr->next_check);
4118         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4119         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4120         printf("TX desc avail = %x\n", txr->avail);
4121
4122         reg = rd32(hw, I40E_GLV_GORCL(0xc));
4123          printf("RX Bytes = %x\n", reg);
4124         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4125          printf("Port RX Bytes = %x\n", reg);
4126         reg = rd32(hw, I40E_GLV_RDPC(0xc));
4127          printf("RX discard = %x\n", reg);
4128         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4129          printf("Port RX discard = %x\n", reg);
4130
4131         reg = rd32(hw, I40E_GLV_TEPC(0xc));
4132          printf("TX errors = %x\n", reg);
4133         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4134          printf("TX Bytes = %x\n", reg);
4135
4136         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4137          printf("RX undersize = %x\n", reg);
4138         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4139          printf("RX fragments = %x\n", reg);
4140         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4141          printf("RX oversize = %x\n", reg);
4142         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4143          printf("RX length error = %x\n", reg);
4144         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4145          printf("mac remote fault = %x\n", reg);
4146         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4147          printf("mac local fault = %x\n", reg);
4148 }
4149
4150 /**
4151  * Update VSI-specific ethernet statistics counters.
4152  **/
4153 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4154 {
4155         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4156         struct i40e_hw *hw = &pf->hw;
4157         struct i40e_eth_stats *es;
4158         struct i40e_eth_stats *oes;
4159         int i;
4160         uint64_t tx_discards;
4161         struct i40e_hw_port_stats *nsd;
4162         u16 stat_idx = vsi->info.stat_counter_idx;
4163
4164         es = &vsi->eth_stats;
4165         oes = &vsi->eth_stats_offsets;
4166         nsd = &pf->stats;
4167
4168         /* Gather up the stats that the hw collects */
4169         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4170                            vsi->stat_offsets_loaded,
4171                            &oes->tx_errors, &es->tx_errors);
4172         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4173                            vsi->stat_offsets_loaded,
4174                            &oes->rx_discards, &es->rx_discards);
4175
4176         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4177                            I40E_GLV_GORCL(stat_idx),
4178                            vsi->stat_offsets_loaded,
4179                            &oes->rx_bytes, &es->rx_bytes);
4180         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4181                            I40E_GLV_UPRCL(stat_idx),
4182                            vsi->stat_offsets_loaded,
4183                            &oes->rx_unicast, &es->rx_unicast);
4184         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4185                            I40E_GLV_MPRCL(stat_idx),
4186                            vsi->stat_offsets_loaded,
4187                            &oes->rx_multicast, &es->rx_multicast);
4188         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4189                            I40E_GLV_BPRCL(stat_idx),
4190                            vsi->stat_offsets_loaded,
4191                            &oes->rx_broadcast, &es->rx_broadcast);
4192
4193         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4194                            I40E_GLV_GOTCL(stat_idx),
4195                            vsi->stat_offsets_loaded,
4196                            &oes->tx_bytes, &es->tx_bytes);
4197         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4198                            I40E_GLV_UPTCL(stat_idx),
4199                            vsi->stat_offsets_loaded,
4200                            &oes->tx_unicast, &es->tx_unicast);
4201         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4202                            I40E_GLV_MPTCL(stat_idx),
4203                            vsi->stat_offsets_loaded,
4204                            &oes->tx_multicast, &es->tx_multicast);
4205         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4206                            I40E_GLV_BPTCL(stat_idx),
4207                            vsi->stat_offsets_loaded,
4208                            &oes->tx_broadcast, &es->tx_broadcast);
4209         vsi->stat_offsets_loaded = true;
4210
4211         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4212         for (i = 0; i < vsi->num_queues; i++)
4213                 tx_discards += vsi->queues[i].txr.br->br_drops;
4214
4215         /* Update ifnet stats */
4216         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4217                            es->rx_multicast +
4218                            es->rx_broadcast);
4219         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4220                            es->tx_multicast +
4221                            es->tx_broadcast);
4222         IXL_SET_IBYTES(vsi, es->rx_bytes);
4223         IXL_SET_OBYTES(vsi, es->tx_bytes);
4224         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4225         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4226
4227         IXL_SET_OERRORS(vsi, es->tx_errors);
4228         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4229         IXL_SET_OQDROPS(vsi, tx_discards);
4230         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4231         IXL_SET_COLLISIONS(vsi, 0);
4232 }
4233
4234 /**
4235  * Reset all of the stats for the given pf
4236  **/
4237 void ixl_pf_reset_stats(struct ixl_pf *pf)
4238 {
4239         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4240         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4241         pf->stat_offsets_loaded = false;
4242 }
4243
4244 /**
4245  * Resets all stats of the given vsi
4246  **/
4247 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4248 {
4249         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4250         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4251         vsi->stat_offsets_loaded = false;
4252 }
4253
4254 /**
4255  * Read and update a 48 bit stat from the hw
4256  *
4257  * Since the device stats are not reset at PFReset, they likely will not
4258  * be zeroed when the driver starts.  We'll save the first values read
4259  * and use them as offsets to be subtracted from the raw values in order
4260  * to report stats that count from zero.
4261  **/
4262 static void
4263 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4264         bool offset_loaded, u64 *offset, u64 *stat)
4265 {
4266         u64 new_data;
4267
4268 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4269         new_data = rd64(hw, loreg);
4270 #else
4271         /*
4272          * Use two rd32's instead of one rd64; FreeBSD versions before
4273          * 10 don't support 8 byte bus reads/writes.
4274          */
4275         new_data = rd32(hw, loreg);
4276         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4277 #endif
4278
4279         if (!offset_loaded)
4280                 *offset = new_data;
4281         if (new_data >= *offset)
4282                 *stat = new_data - *offset;
4283         else
4284                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4285         *stat &= 0xFFFFFFFFFFFFULL;
4286 }
4287
4288 /**
4289  * Read and update a 32 bit stat from the hw
4290  **/
4291 static void
4292 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4293         bool offset_loaded, u64 *offset, u64 *stat)
4294 {
4295         u32 new_data;
4296
4297         new_data = rd32(hw, reg);
4298         if (!offset_loaded)
4299                 *offset = new_data;
4300         if (new_data >= *offset)
4301                 *stat = (u32)(new_data - *offset);
4302         else
4303                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4304 }
4305
4306 /*
4307 ** Set flow control using sysctl:
4308 **      0 - off
4309 **      1 - rx pause
4310 **      2 - tx pause
4311 **      3 - full
4312 */
4313 static int
4314 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4315 {
4316         /*
4317          * TODO: ensure flow control is disabled if
4318          * priority flow control is enabled
4319          *
4320          * TODO: ensure tx CRC by hardware should be enabled
4321          * if tx flow control is enabled.
4322          */
4323         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4324         struct i40e_hw *hw = &pf->hw;
4325         device_t dev = pf->dev;
4326         int error = 0;
4327         enum i40e_status_code aq_error = 0;
4328         u8 fc_aq_err = 0;
4329
4330         /* Get request */
4331         error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4332         if ((error) || (req->newptr == NULL))
4333                 return (error);
4334         if (pf->fc < 0 || pf->fc > 3) {
4335                 device_printf(dev,
4336                     "Invalid fc mode; valid modes are 0 through 3\n");
4337                 return (EINVAL);
4338         }
4339
4340         /*
4341         ** Changing flow control mode currently does not work on
4342         ** 40GBASE-CR4 PHYs
4343         */
4344         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4345             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4346                 device_printf(dev, "Changing flow control mode unsupported"
4347                     " on 40GBase-CR4 media.\n");
4348                 return (ENODEV);
4349         }
4350
4351         /* Set fc ability for port */
4352         hw->fc.requested_mode = pf->fc;
4353         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4354         if (aq_error) {
4355                 device_printf(dev,
4356                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4357                     __func__, aq_error, fc_aq_err);
4358                 return (EAGAIN);
4359         }
4360
4361         return (0);
4362 }
4363
4364 static int
4365 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4366 {
4367         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4368         struct i40e_hw *hw = &pf->hw;
4369         int error = 0, index = 0;
4370
4371         char *speeds[] = {
4372                 "Unknown",
4373                 "100M",
4374                 "1G",
4375                 "10G",
4376                 "40G",
4377                 "20G"
4378         };
4379
4380         ixl_update_link_status(pf);
4381
4382         switch (hw->phy.link_info.link_speed) {
4383         case I40E_LINK_SPEED_100MB:
4384                 index = 1;
4385                 break;
4386         case I40E_LINK_SPEED_1GB:
4387                 index = 2;
4388                 break;
4389         case I40E_LINK_SPEED_10GB:
4390                 index = 3;
4391                 break;
4392         case I40E_LINK_SPEED_40GB:
4393                 index = 4;
4394                 break;
4395         case I40E_LINK_SPEED_20GB:
4396                 index = 5;
4397                 break;
4398         case I40E_LINK_SPEED_UNKNOWN:
4399         default:
4400                 index = 0;
4401                 break;
4402         }
4403
4404         error = sysctl_handle_string(oidp, speeds[index],
4405             strlen(speeds[index]), req);
4406         return (error);
4407 }
4408
4409 static int
4410 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4411 {
4412         struct i40e_hw *hw = &pf->hw;
4413         device_t dev = pf->dev;
4414         struct i40e_aq_get_phy_abilities_resp abilities;
4415         struct i40e_aq_set_phy_config config;
4416         enum i40e_status_code aq_error = 0;
4417
4418         /* Get current capability information */
4419         aq_error = i40e_aq_get_phy_capabilities(hw,
4420             FALSE, FALSE, &abilities, NULL);
4421         if (aq_error) {
4422                 device_printf(dev,
4423                     "%s: Error getting phy capabilities %d,"
4424                     " aq error: %d\n", __func__, aq_error,
4425                     hw->aq.asq_last_status);
4426                 return (EAGAIN);
4427         }
4428
4429         /* Prepare new config */
4430         bzero(&config, sizeof(config));
4431         config.phy_type = abilities.phy_type;
4432         config.abilities = abilities.abilities
4433             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4434         config.eee_capability = abilities.eee_capability;
4435         config.eeer = abilities.eeer_val;
4436         config.low_power_ctrl = abilities.d3_lpan;
4437         /* Translate into aq cmd link_speed */
4438         if (speeds & 0x4)
4439                 config.link_speed |= I40E_LINK_SPEED_10GB;
4440         if (speeds & 0x2)
4441                 config.link_speed |= I40E_LINK_SPEED_1GB;
4442         if (speeds & 0x1)
4443                 config.link_speed |= I40E_LINK_SPEED_100MB;
4444
4445         /* Do aq command & restart link */
4446         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4447         if (aq_error) {
4448                 device_printf(dev,
4449                     "%s: Error setting new phy config %d,"
4450                     " aq error: %d\n", __func__, aq_error,
4451                     hw->aq.asq_last_status);
4452                 return (EAGAIN);
4453         }
4454
4455         /*
4456         ** This seems a bit heavy handed, but we
4457         ** need to get a reinit on some devices
4458         */
4459         IXL_PF_LOCK(pf);
4460         ixl_stop(pf);
4461         ixl_init_locked(pf);
4462         IXL_PF_UNLOCK(pf);
4463
4464         return (0);
4465 }
4466
4467 /*
4468 ** Control link advertise speed:
4469 **      Flags:
4470 **      0x1 - advertise 100 Mb
4471 **      0x2 - advertise 1G
4472 **      0x4 - advertise 10G
4473 **
4474 ** Does not work on 40G devices.
4475 */
4476 static int
4477 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4478 {
4479         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4480         struct i40e_hw *hw = &pf->hw;
4481         device_t dev = pf->dev;
4482         int requested_ls = 0;
4483         int error = 0;
4484
4485         /*
4486         ** FW doesn't support changing advertised speed
4487         ** for 40G devices; speed is always 40G.
4488         */
4489         if (i40e_is_40G_device(hw->device_id))
4490                 return (ENODEV);
4491
4492         /* Read in new mode */
4493         requested_ls = pf->advertised_speed;
4494         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4495         if ((error) || (req->newptr == NULL))
4496                 return (error);
4497         if (requested_ls < 1 || requested_ls > 7) {
4498                 device_printf(dev,
4499                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4500                 return (EINVAL);
4501         }
4502
4503         /* Exit if no change */
4504         if (pf->advertised_speed == requested_ls)
4505                 return (0);
4506
4507         error = ixl_set_advertised_speeds(pf, requested_ls);
4508         if (error)
4509                 return (error);
4510
4511         pf->advertised_speed = requested_ls;
4512         ixl_update_link_status(pf);
4513         return (0);
4514 }
4515
4516 /*
4517 ** Get the width and transaction speed of
4518 ** the bus this adapter is plugged into.
4519 */
4520 static u16
4521 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4522 {
4523         u16                     link;
4524         u32                     offset;
4525                 
4526                 
4527         /* Get the PCI Express Capabilities offset */
4528         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4529
4530         /* ...and read the Link Status Register */
4531         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4532
4533         switch (link & I40E_PCI_LINK_WIDTH) {
4534         case I40E_PCI_LINK_WIDTH_1:
4535                 hw->bus.width = i40e_bus_width_pcie_x1;
4536                 break;
4537         case I40E_PCI_LINK_WIDTH_2:
4538                 hw->bus.width = i40e_bus_width_pcie_x2;
4539                 break;
4540         case I40E_PCI_LINK_WIDTH_4:
4541                 hw->bus.width = i40e_bus_width_pcie_x4;
4542                 break;
4543         case I40E_PCI_LINK_WIDTH_8:
4544                 hw->bus.width = i40e_bus_width_pcie_x8;
4545                 break;
4546         default:
4547                 hw->bus.width = i40e_bus_width_unknown;
4548                 break;
4549         }
4550
4551         switch (link & I40E_PCI_LINK_SPEED) {
4552         case I40E_PCI_LINK_SPEED_2500:
4553                 hw->bus.speed = i40e_bus_speed_2500;
4554                 break;
4555         case I40E_PCI_LINK_SPEED_5000:
4556                 hw->bus.speed = i40e_bus_speed_5000;
4557                 break;
4558         case I40E_PCI_LINK_SPEED_8000:
4559                 hw->bus.speed = i40e_bus_speed_8000;
4560                 break;
4561         default:
4562                 hw->bus.speed = i40e_bus_speed_unknown;
4563                 break;
4564         }
4565
4566
4567         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4568             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4569             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4570             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4571             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4572             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4573             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4574             ("Unknown"));
4575
4576         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4577             (hw->bus.speed < i40e_bus_speed_8000)) {
4578                 device_printf(dev, "PCI-Express bandwidth available"
4579                     " for this device\n     is not sufficient for"
4580                     " normal operation.\n");
4581                 device_printf(dev, "For expected performance a x8 "
4582                     "PCIE Gen3 slot is required.\n");
4583         }
4584
4585         return (link);
4586 }
4587
4588 static int
4589 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4590 {
4591         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4592         struct i40e_hw  *hw = &pf->hw;
4593         char            buf[32];
4594
4595         snprintf(buf, sizeof(buf),
4596             "f%d.%d a%d.%d n%02x.%02x e%08x",
4597             hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4598             hw->aq.api_maj_ver, hw->aq.api_min_ver,
4599             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4600             IXL_NVM_VERSION_HI_SHIFT,
4601             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4602             IXL_NVM_VERSION_LO_SHIFT,
4603             hw->nvm.eetrack);
4604         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4605 }
4606
4607
4608 #ifdef IXL_DEBUG_SYSCTL
4609 static int
4610 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4611 {
4612         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4613         struct i40e_hw *hw = &pf->hw;
4614         struct i40e_link_status link_status;
4615         char buf[512];
4616
4617         enum i40e_status_code aq_error = 0;
4618
4619         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4620         if (aq_error) {
4621                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4622                 return (EPERM);
4623         }
4624
4625         sprintf(buf, "\n"
4626             "PHY Type : %#04x\n"
4627             "Speed    : %#04x\n" 
4628             "Link info: %#04x\n" 
4629             "AN info  : %#04x\n" 
4630             "Ext info : %#04x", 
4631             link_status.phy_type, link_status.link_speed, 
4632             link_status.link_info, link_status.an_info,
4633             link_status.ext_info);
4634
4635         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4636 }
4637
4638 static int
4639 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4640 {
4641         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4642         struct i40e_hw *hw = &pf->hw;
4643         struct i40e_aq_get_phy_abilities_resp abilities_resp;
4644         char buf[512];
4645
4646         enum i40e_status_code aq_error = 0;
4647
4648         // TODO: Print out list of qualified modules as well?
4649         aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4650         if (aq_error) {
4651                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4652                 return (EPERM);
4653         }
4654
4655         sprintf(buf, "\n"
4656             "PHY Type : %#010x\n"
4657             "Speed    : %#04x\n" 
4658             "Abilities: %#04x\n" 
4659             "EEE cap  : %#06x\n" 
4660             "EEER reg : %#010x\n" 
4661             "D3 Lpan  : %#04x",
4662             abilities_resp.phy_type, abilities_resp.link_speed, 
4663             abilities_resp.abilities, abilities_resp.eee_capability,
4664             abilities_resp.eeer_val, abilities_resp.d3_lpan);
4665
4666         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4667 }
4668
4669 static int
4670 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4671 {
4672         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4673         struct ixl_vsi *vsi = &pf->vsi;
4674         struct ixl_mac_filter *f;
4675         char *buf, *buf_i;
4676
4677         int error = 0;
4678         int ftl_len = 0;
4679         int ftl_counter = 0;
4680         int buf_len = 0;
4681         int entry_len = 42;
4682
4683         SLIST_FOREACH(f, &vsi->ftl, next) {
4684                 ftl_len++;
4685         }
4686
4687         if (ftl_len < 1) {
4688                 sysctl_handle_string(oidp, "(none)", 6, req);
4689                 return (0);
4690         }
4691
4692         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4693         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4694
4695         sprintf(buf_i++, "\n");
4696         SLIST_FOREACH(f, &vsi->ftl, next) {
4697                 sprintf(buf_i,
4698                     MAC_FORMAT ", vlan %4d, flags %#06x",
4699                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4700                 buf_i += entry_len;
4701                 /* don't print '\n' for last entry */
4702                 if (++ftl_counter != ftl_len) {
4703                         sprintf(buf_i, "\n");
4704                         buf_i++;
4705                 }
4706         }
4707
4708         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4709         if (error)
4710                 printf("sysctl error: %d\n", error);
4711         free(buf, M_DEVBUF);
4712         return error;
4713 }
4714
4715 #define IXL_SW_RES_SIZE 0x14
4716 static int
4717 ixl_res_alloc_cmp(const void *a, const void *b)
4718 {
4719         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4720         one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4721         two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4722
4723         return ((int)one->resource_type - (int)two->resource_type);
4724 }
4725
4726 static int
4727 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4728 {
4729         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4730         struct i40e_hw *hw = &pf->hw;
4731         device_t dev = pf->dev;
4732         struct sbuf *buf;
4733         int error = 0;
4734
4735         u8 num_entries;
4736         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4737
4738         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4739         if (!buf) {
4740                 device_printf(dev, "Could not allocate sbuf for output.\n");
4741                 return (ENOMEM);
4742         }
4743
4744         bzero(resp, sizeof(resp));
4745         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4746                                 resp,
4747                                 IXL_SW_RES_SIZE,
4748                                 NULL);
4749         if (error) {
4750                 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4751                     __func__, error, hw->aq.asq_last_status);
4752                 sbuf_delete(buf);
4753                 return error;
4754         }
4755
4756         /* Sort entries by type for display */
4757         qsort(resp, num_entries,
4758             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4759             &ixl_res_alloc_cmp);
4760
4761         sbuf_cat(buf, "\n");
4762         sbuf_printf(buf, "# of entries: %d\n", num_entries);
4763         sbuf_printf(buf,
4764             "Type | Guaranteed | Total | Used   | Un-allocated\n"
4765             "     | (this)     | (all) | (this) | (all)       \n");
4766         for (int i = 0; i < num_entries; i++) {
4767                 sbuf_printf(buf,
4768                     "%#4x | %10d   %5d   %6d   %12d",
4769                     resp[i].resource_type,
4770                     resp[i].guaranteed,
4771                     resp[i].total,
4772                     resp[i].used,
4773                     resp[i].total_unalloced);
4774                 if (i < num_entries - 1)
4775                         sbuf_cat(buf, "\n");
4776         }
4777
4778         error = sbuf_finish(buf);
4779         sbuf_delete(buf);
4780
4781         return (error);
4782 }
4783
4784 /*
4785 ** Caller must init and delete sbuf; this function will clear and
4786 ** finish it for caller.
4787 */
4788 static char *
4789 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4790 {
4791         sbuf_clear(s);
4792
4793         if (seid == 0 && uplink)
4794                 sbuf_cat(s, "Network");
4795         else if (seid == 0)
4796                 sbuf_cat(s, "Host");
4797         else if (seid == 1)
4798                 sbuf_cat(s, "EMP");
4799         else if (seid <= 5)
4800                 sbuf_printf(s, "MAC %d", seid - 2);
4801         else if (seid <= 15)
4802                 sbuf_cat(s, "Reserved");
4803         else if (seid <= 31)
4804                 sbuf_printf(s, "PF %d", seid - 16);
4805         else if (seid <= 159)
4806                 sbuf_printf(s, "VF %d", seid - 32);
4807         else if (seid <= 287)
4808                 sbuf_cat(s, "Reserved");
4809         else if (seid <= 511)
4810                 sbuf_cat(s, "Other"); // for other structures
4811         else if (seid <= 895)
4812                 sbuf_printf(s, "VSI %d", seid - 512);
4813         else if (seid <= 1023)
4814                 sbuf_printf(s, "Reserved");
4815         else
4816                 sbuf_cat(s, "Invalid");
4817
4818         sbuf_finish(s);
4819         return sbuf_data(s);
4820 }
4821
4822 static int
4823 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4824 {
4825         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4826         struct i40e_hw *hw = &pf->hw;
4827         device_t dev = pf->dev;
4828         struct sbuf *buf;
4829         struct sbuf *nmbuf;
4830         int error = 0;
4831         u8 aq_buf[I40E_AQ_LARGE_BUF];
4832
4833         u16 next = 0;
4834         struct i40e_aqc_get_switch_config_resp *sw_config;
4835         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4836
4837         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4838         if (!buf) {
4839                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4840                 return (ENOMEM);
4841         }
4842
4843         error = i40e_aq_get_switch_config(hw, sw_config,
4844             sizeof(aq_buf), &next, NULL);
4845         if (error) {
4846                 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4847                     __func__, error, hw->aq.asq_last_status);
4848                 sbuf_delete(buf);
4849                 return error;
4850         }
4851
4852         nmbuf = sbuf_new_auto();
4853         if (!nmbuf) {
4854                 device_printf(dev, "Could not allocate sbuf for name output.\n");
4855                 return (ENOMEM);
4856         }
4857
4858         sbuf_cat(buf, "\n");
4859         // Assuming <= 255 elements in switch
4860         sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4861         /* Exclude:
4862         ** Revision -- all elements are revision 1 for now
4863         */
4864         sbuf_printf(buf,
4865             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
4866             "                |          |          | (uplink)\n");
4867         for (int i = 0; i < sw_config->header.num_reported; i++) {
4868                 // "%4d (%8s) | %8s   %8s   %#8x",
4869                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4870                 sbuf_cat(buf, " ");
4871                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4872                 sbuf_cat(buf, " | ");
4873                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4874                 sbuf_cat(buf, "   ");
4875                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4876                 sbuf_cat(buf, "   ");
4877                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4878                 if (i < sw_config->header.num_reported - 1)
4879                         sbuf_cat(buf, "\n");
4880         }
4881         sbuf_delete(nmbuf);
4882
4883         error = sbuf_finish(buf);
4884         sbuf_delete(buf);
4885
4886         return (error);
4887 }
4888
4889 /*
4890 ** Dump TX desc given index.
4891 ** Doesn't work; don't use.
4892 ** TODO: Also needs a queue index input!
4893 **/
4894 static int
4895 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4896 {
4897         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4898         device_t dev = pf->dev;
4899         struct sbuf *buf;
4900         int error = 0;
4901
4902         u16 desc_idx = 0;
4903
4904         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4905         if (!buf) {
4906                 device_printf(dev, "Could not allocate sbuf for output.\n");
4907                 return (ENOMEM);
4908         }
4909
4910         /* Read in index */
4911         error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4912         if (error)
4913                 return (error);
4914         if (req->newptr == NULL)
4915                 return (EIO); // fix
4916         if (desc_idx > 1024) { // fix
4917                 device_printf(dev,
4918                     "Invalid descriptor index, needs to be < 1024\n"); // fix
4919                 return (EINVAL);
4920         }
4921
4922         // Don't use this sysctl yet
4923         if (TRUE)
4924                 return (ENODEV);
4925
4926         sbuf_cat(buf, "\n");
4927
4928         // set to queue 1?
4929         struct ixl_queue *que = pf->vsi.queues;
4930         struct tx_ring *txr = &(que[1].txr);
4931         struct i40e_tx_desc *txd = &txr->base[desc_idx];
4932
4933         sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4934         sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4935         sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4936
4937         error = sbuf_finish(buf);
4938         if (error) {
4939                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4940                 sbuf_delete(buf);
4941                 return error;
4942         }
4943
4944         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4945         if (error)
4946                 device_printf(dev, "sysctl error: %d\n", error);
4947         sbuf_delete(buf);
4948         return error;
4949 }
4950 #endif /* IXL_DEBUG_SYSCTL */
4951