]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/if_ixl.c
ixl: Update to 1.4.17-k.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 #endif
40
41 #include "ixl.h"
42 #include "ixl_pf.h"
43
44 #ifdef RSS
45 #include <net/rss_config.h>
46 #endif
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixl_driver_version[] = "1.4.17-k";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixl_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73         /* required last entry */
74         {0, 0, 0, 0, 0}
75 };
76
77 /*********************************************************************
78  *  Table of branding strings
79  *********************************************************************/
80
81 static char    *ixl_strings[] = {
82         "Intel(R) Ethernet Connection XL710 Driver"
83 };
84
85
86 /*********************************************************************
87  *  Function prototypes
88  *********************************************************************/
89 static int      ixl_probe(device_t);
90 static int      ixl_attach(device_t);
91 static int      ixl_detach(device_t);
92 static int      ixl_shutdown(device_t);
93 static int      ixl_get_hw_capabilities(struct ixl_pf *);
94 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
96 static void     ixl_init(void *);
97 static void     ixl_init_locked(struct ixl_pf *);
98 static void     ixl_stop(struct ixl_pf *);
99 static void     ixl_stop_locked(struct ixl_pf *);
100 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
101 static int      ixl_media_change(struct ifnet *);
102 static void     ixl_update_link_status(struct ixl_pf *);
103 static int      ixl_allocate_pci_resources(struct ixl_pf *);
104 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
105 static int      ixl_setup_stations(struct ixl_pf *);
106 static int      ixl_switch_config(struct ixl_pf *);
107 static int      ixl_initialize_vsi(struct ixl_vsi *);
108 static int      ixl_assign_vsi_msix(struct ixl_pf *);
109 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
110 static int      ixl_init_msix(struct ixl_pf *);
111 static void     ixl_configure_msix(struct ixl_pf *);
112 static void     ixl_configure_itr(struct ixl_pf *);
113 static void     ixl_configure_legacy(struct ixl_pf *);
114 static void     ixl_init_taskqueues(struct ixl_pf *);
115 static void     ixl_free_taskqueues(struct ixl_pf *);
116 static void     ixl_free_interrupt_resources(struct ixl_pf *);
117 static void     ixl_free_pci_resources(struct ixl_pf *);
118 static void     ixl_local_timer(void *);
119 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
120 static void     ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
121 static void     ixl_config_rss(struct ixl_vsi *);
122 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
123 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
124 static int      ixl_set_advertised_speeds(struct ixl_pf *, int);
125
126 static int      ixl_enable_rings(struct ixl_vsi *);
127 static int      ixl_disable_rings(struct ixl_vsi *);
128 static void     ixl_enable_intr(struct ixl_vsi *);
129 static void     ixl_disable_intr(struct ixl_vsi *);
130 static void     ixl_disable_rings_intr(struct ixl_vsi *);
131
132 static void     ixl_enable_adminq(struct i40e_hw *);
133 static void     ixl_disable_adminq(struct i40e_hw *);
134 static void     ixl_enable_queue(struct i40e_hw *, int);
135 static void     ixl_disable_queue(struct i40e_hw *, int);
136 static void     ixl_enable_legacy(struct i40e_hw *);
137 static void     ixl_disable_legacy(struct i40e_hw *);
138
139 static void     ixl_set_promisc(struct ixl_vsi *);
140 static void     ixl_add_multi(struct ixl_vsi *);
141 static void     ixl_del_multi(struct ixl_vsi *);
142 static void     ixl_register_vlan(void *, struct ifnet *, u16);
143 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
144 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
145
146 static void     ixl_init_filters(struct ixl_vsi *);
147 static void     ixl_reconfigure_filters(struct ixl_vsi *vsi);
148 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
149 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
150 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
151 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
152 static struct ixl_mac_filter *
153                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
154 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
155 static void     ixl_free_mac_filters(struct ixl_vsi *vsi);
156
157 /* Sysctls*/
158 static void     ixl_add_device_sysctls(struct ixl_pf *);
159
160 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
161 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
162 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
163 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
164
165 #ifdef IXL_DEBUG_SYSCTL
166 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
167 static void     ixl_print_debug_info(struct ixl_pf *);
168
169 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
170 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
171 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
172 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
173 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
174 #endif
175
176 /* The MSI/X Interrupt handlers */
177 static void     ixl_intr(void *);
178 static void     ixl_msix_que(void *);
179 static void     ixl_msix_adminq(void *);
180 static void     ixl_handle_mdd_event(struct ixl_pf *);
181
182 /* Deferred interrupt tasklets */
183 static void     ixl_do_adminq(void *, int);
184
185 /* Statistics */
186 static void     ixl_add_hw_stats(struct ixl_pf *);
187 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
188                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
189 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
190                     struct sysctl_oid_list *,
191                     struct i40e_eth_stats *);
192 static void     ixl_update_stats_counters(struct ixl_pf *);
193 static void     ixl_update_eth_stats(struct ixl_vsi *);
194 static void     ixl_update_vsi_stats(struct ixl_vsi *);
195 static void     ixl_pf_reset_stats(struct ixl_pf *);
196 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
197 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
198                     u64 *, u64 *);
199 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
200                     u64 *, u64 *);
201 /* NVM update */
202 static int      ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
203
204
205 #ifdef PCI_IOV
206 static int      ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
207
208 static int      ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
209 static void     ixl_iov_uninit(device_t dev);
210 static int      ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
211
212 static void     ixl_handle_vf_msg(struct ixl_pf *,
213                     struct i40e_arq_event_info *);
214 static void     ixl_handle_vflr(void *arg, int pending);
215
216 static void     ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
217 static void     ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
218 #endif
219
220 /*********************************************************************
221  *  FreeBSD Device Interface Entry Points
222  *********************************************************************/
223
224 static device_method_t ixl_methods[] = {
225         /* Device interface */
226         DEVMETHOD(device_probe, ixl_probe),
227         DEVMETHOD(device_attach, ixl_attach),
228         DEVMETHOD(device_detach, ixl_detach),
229         DEVMETHOD(device_shutdown, ixl_shutdown),
230 #ifdef PCI_IOV
231         DEVMETHOD(pci_iov_init, ixl_iov_init),
232         DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
233         DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
234 #endif
235         {0, 0}
236 };
237
238 static driver_t ixl_driver = {
239         "ixl", ixl_methods, sizeof(struct ixl_pf),
240 };
241
242 devclass_t ixl_devclass;
243 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
244
245 MODULE_DEPEND(ixl, pci, 1, 1, 1);
246 MODULE_DEPEND(ixl, ether, 1, 1, 1);
247 #ifdef DEV_NETMAP
248 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
249 #endif /* DEV_NETMAP */
250
251 /*
252 ** Global reset mutex
253 */
254 static struct mtx ixl_reset_mtx;
255
256 /*
257 ** TUNEABLE PARAMETERS:
258 */
259
260 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
261                    "IXL driver parameters");
262
263 /*
264  * MSIX should be the default for best performance,
265  * but this allows it to be forced off for testing.
266  */
267 static int ixl_enable_msix = 1;
268 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
270     "Enable MSI-X interrupts");
271
272 /*
273 ** Number of descriptors per ring:
274 **   - TX and RX are the same size
275 */
276 static int ixl_ringsz = DEFAULT_RING;
277 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
278 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
279     &ixl_ringsz, 0, "Descriptor Ring Size");
280
281 /* 
282 ** This can be set manually, if left as 0 the
283 ** number of queues will be calculated based
284 ** on cpus and msix vectors available.
285 */
286 int ixl_max_queues = 0;
287 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
289     &ixl_max_queues, 0, "Number of Queues");
290
291 /*
292 ** Controls for Interrupt Throttling 
293 **      - true/false for dynamic adjustment
294 **      - default values for static ITR
295 */
296 int ixl_dynamic_rx_itr = 0;
297 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
299     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
300
301 int ixl_dynamic_tx_itr = 0;
302 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
304     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
305
306 int ixl_rx_itr = IXL_ITR_8K;
307 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
308 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
309     &ixl_rx_itr, 0, "RX Interrupt Rate");
310
311 int ixl_tx_itr = IXL_ITR_4K;
312 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
314     &ixl_tx_itr, 0, "TX Interrupt Rate");
315
316 #ifdef IXL_FDIR
317 static int ixl_enable_fdir = 1;
318 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
319 /* Rate at which we sample */
320 int ixl_atr_rate = 20;
321 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
322 #endif
323
324 #ifdef DEV_NETMAP
325 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
326 #include <dev/netmap/if_ixl_netmap.h>
327 #endif /* DEV_NETMAP */
328
329 static char *ixl_fc_string[6] = {
330         "None",
331         "Rx",
332         "Tx",
333         "Full",
334         "Priority",
335         "Default"
336 };
337
338 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
339
340 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
341     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
342
343 /*********************************************************************
344  *  Device identification routine
345  *
346  *  ixl_probe determines if the driver should be loaded on
347  *  the hardware based on PCI vendor/device id of the device.
348  *
349  *  return BUS_PROBE_DEFAULT on success, positive on failure
350  *********************************************************************/
351
352 static int
353 ixl_probe(device_t dev)
354 {
355         ixl_vendor_info_t *ent;
356
357         u16     pci_vendor_id, pci_device_id;
358         u16     pci_subvendor_id, pci_subdevice_id;
359         char    device_name[256];
360         static bool lock_init = FALSE;
361
362 #if 0
363         INIT_DEBUGOUT("ixl_probe: begin");
364 #endif
365         pci_vendor_id = pci_get_vendor(dev);
366         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
367                 return (ENXIO);
368
369         pci_device_id = pci_get_device(dev);
370         pci_subvendor_id = pci_get_subvendor(dev);
371         pci_subdevice_id = pci_get_subdevice(dev);
372
373         ent = ixl_vendor_info_array;
374         while (ent->vendor_id != 0) {
375                 if ((pci_vendor_id == ent->vendor_id) &&
376                     (pci_device_id == ent->device_id) &&
377
378                     ((pci_subvendor_id == ent->subvendor_id) ||
379                      (ent->subvendor_id == 0)) &&
380
381                     ((pci_subdevice_id == ent->subdevice_id) ||
382                      (ent->subdevice_id == 0))) {
383                         sprintf(device_name, "%s, Version - %s",
384                                 ixl_strings[ent->index],
385                                 ixl_driver_version);
386                         device_set_desc_copy(dev, device_name);
387                         /* One shot mutex init */
388                         if (lock_init == FALSE) {
389                                 lock_init = TRUE;
390                                 mtx_init(&ixl_reset_mtx,
391                                     "ixl_reset",
392                                     "IXL RESET Lock", MTX_DEF);
393                         }
394                         return (BUS_PROBE_DEFAULT);
395                 }
396                 ent++;
397         }
398         return (ENXIO);
399 }
400
401 /*********************************************************************
402  *  Device initialization routine
403  *
404  *  The attach entry point is called when the driver is being loaded.
405  *  This routine identifies the type of hardware, allocates all resources
406  *  and initializes the hardware.
407  *
408  *  return 0 on success, positive on failure
409  *********************************************************************/
410
411 static int
412 ixl_attach(device_t dev)
413 {
414         struct ixl_pf   *pf;
415         struct i40e_hw  *hw;
416         struct ixl_vsi  *vsi;
417         u16             bus;
418         int             error = 0;
419 #ifdef PCI_IOV
420         nvlist_t        *pf_schema, *vf_schema;
421         int             iov_error;
422 #endif
423
424         INIT_DEBUGOUT("ixl_attach: begin");
425
426         /* Allocate, clear, and link in our primary soft structure */
427         pf = device_get_softc(dev);
428         pf->dev = pf->osdep.dev = dev;
429         hw = &pf->hw;
430
431         /*
432         ** Note this assumes we have a single embedded VSI,
433         ** this could be enhanced later to allocate multiple
434         */
435         vsi = &pf->vsi;
436         vsi->dev = pf->dev;
437
438         /* Core Lock Init*/
439         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
440
441         /* Set up the timer callout */
442         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
443
444         /* Save off the PCI information */
445         hw->vendor_id = pci_get_vendor(dev);
446         hw->device_id = pci_get_device(dev);
447         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
448         hw->subsystem_vendor_id =
449             pci_read_config(dev, PCIR_SUBVEND_0, 2);
450         hw->subsystem_device_id =
451             pci_read_config(dev, PCIR_SUBDEV_0, 2);
452
453         hw->bus.device = pci_get_slot(dev);
454         hw->bus.func = pci_get_function(dev);
455
456         pf->vc_debug_lvl = 1;
457
458         /* Do PCI setup - map BAR0, etc */
459         if (ixl_allocate_pci_resources(pf)) {
460                 device_printf(dev, "Allocation of PCI resources failed\n");
461                 error = ENXIO;
462                 goto err_out;
463         }
464
465         /* Establish a clean starting point */
466         i40e_clear_hw(hw);
467         error = i40e_pf_reset(hw);
468         if (error) {
469                 device_printf(dev, "PF reset failure %d\n", error);
470                 error = EIO;
471                 goto err_out;
472         }
473
474         /* Set admin queue parameters */
475         hw->aq.num_arq_entries = IXL_AQ_LEN;
476         hw->aq.num_asq_entries = IXL_AQ_LEN;
477         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
478         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
479
480         /* Initialize mac filter list for VSI */
481         SLIST_INIT(&vsi->ftl);
482
483         /* Initialize the shared code */
484         error = i40e_init_shared_code(hw);
485         if (error) {
486                 device_printf(dev, "Unable to initialize shared code, error %d\n",
487                     error);
488                 error = EIO;
489                 goto err_out;
490         }
491
492         /* Set up the admin queue */
493         error = i40e_init_adminq(hw);
494         if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
495                 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
496                     error);
497                 error = EIO;
498                 goto err_out;
499         }
500         ixl_print_nvm_version(pf);
501
502         if (error == I40E_ERR_FIRMWARE_API_VERSION) {
503                 device_printf(dev, "The driver for the device stopped "
504                     "because the NVM image is newer than expected.\n"
505                     "You must install the most recent version of "
506                     "the network driver.\n");
507                 error = EIO;
508                 goto err_out;
509         }
510
511         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
512             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
513                 device_printf(dev, "The driver for the device detected "
514                     "a newer version of the NVM image than expected.\n"
515                     "Please install the most recent version of the network driver.\n");
516         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
517             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
518                 device_printf(dev, "The driver for the device detected "
519                     "an older version of the NVM image than expected.\n"
520                     "Please update the NVM image.\n");
521
522         /* Clear PXE mode */
523         i40e_clear_pxe_mode(hw);
524
525         /* Get capabilities from the device */
526         error = ixl_get_hw_capabilities(pf);
527         if (error) {
528                 device_printf(dev, "HW capabilities failure!\n");
529                 goto err_get_cap;
530         }
531
532         /* Set up host memory cache */
533         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
534             hw->func_caps.num_rx_qp, 0, 0);
535         if (error) {
536                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
537                 goto err_get_cap;
538         }
539
540         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
541         if (error) {
542                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
543                 goto err_mac_hmc;
544         }
545
546         /* Disable LLDP from the firmware */
547         i40e_aq_stop_lldp(hw, TRUE, NULL);
548
549         i40e_get_mac_addr(hw, hw->mac.addr);
550         error = i40e_validate_mac_addr(hw->mac.addr);
551         if (error) {
552                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
553                 goto err_mac_hmc;
554         }
555         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
556         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
557
558         /* Set up VSI and queues */
559         if (ixl_setup_stations(pf) != 0) { 
560                 device_printf(dev, "setup stations failed!\n");
561                 error = ENOMEM;
562                 goto err_mac_hmc;
563         }
564
565         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
566             (hw->aq.fw_maj_ver < 4)) {
567                 i40e_msec_delay(75);
568                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
569                 if (error) {
570                         device_printf(dev, "link restart failed, aq_err=%d\n",
571                             pf->hw.aq.asq_last_status);
572                         goto err_late;
573                 }
574         }
575
576         /* Determine link state */
577         hw->phy.get_link_info = TRUE;
578         i40e_get_link_status(hw, &pf->link_up);
579
580         /* Setup OS network interface / ifnet */
581         if (ixl_setup_interface(dev, vsi) != 0) {
582                 device_printf(dev, "interface setup failed!\n");
583                 error = EIO;
584                 goto err_late;
585         }
586
587         error = ixl_switch_config(pf);
588         if (error) {
589                 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error);
590                 goto err_late;
591         }
592
593         /* Limit PHY interrupts to link, autoneg, and modules failure */
594         error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
595             NULL);
596         if (error) {
597                 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
598                     " aq_err %d\n", error, hw->aq.asq_last_status);
599                 goto err_late;
600         }
601
602         /* Get the bus configuration and set the shared code */
603         bus = ixl_get_bus_info(hw, dev);
604         i40e_set_pci_config_data(hw, bus);
605
606         /* Initialize taskqueues */
607         ixl_init_taskqueues(pf);
608
609         /* Initialize statistics & add sysctls */
610         ixl_add_device_sysctls(pf);
611
612         ixl_pf_reset_stats(pf);
613         ixl_update_stats_counters(pf);
614         ixl_add_hw_stats(pf);
615
616         /* Register for VLAN events */
617         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
618             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
619         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
620             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
621
622 #ifdef PCI_IOV
623         /* SR-IOV is only supported when MSI-X is in use. */
624         if (pf->msix > 1) {
625                 pf_schema = pci_iov_schema_alloc_node();
626                 vf_schema = pci_iov_schema_alloc_node();
627                 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
628                 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
629                     IOV_SCHEMA_HASDEFAULT, TRUE);
630                 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
631                     IOV_SCHEMA_HASDEFAULT, FALSE);
632                 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
633                     IOV_SCHEMA_HASDEFAULT, FALSE);
634
635                 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
636                 if (iov_error != 0) {
637                         device_printf(dev,
638                             "Failed to initialize SR-IOV (error=%d)\n",
639                             iov_error);
640                 } else
641                         device_printf(dev, "SR-IOV ready\n");
642         }
643 #endif
644
645 #ifdef DEV_NETMAP
646         ixl_netmap_attach(vsi);
647 #endif /* DEV_NETMAP */
648         INIT_DEBUGOUT("ixl_attach: end");
649         return (0);
650
651 err_late:
652         if (vsi->ifp != NULL)
653                 if_free(vsi->ifp);
654 err_mac_hmc:
655         i40e_shutdown_lan_hmc(hw);
656 err_get_cap:
657         i40e_shutdown_adminq(hw);
658 err_out:
659         ixl_free_pci_resources(pf);
660         ixl_free_vsi(vsi);
661         IXL_PF_LOCK_DESTROY(pf);
662         return (error);
663 }
664
665 /*********************************************************************
666  *  Device removal routine
667  *
668  *  The detach entry point is called when the driver is being removed.
669  *  This routine stops the adapter and deallocates all the resources
670  *  that were allocated for driver operation.
671  *
672  *  return 0 on success, positive on failure
673  *********************************************************************/
674
675 static int
676 ixl_detach(device_t dev)
677 {
678         struct ixl_pf           *pf = device_get_softc(dev);
679         struct i40e_hw          *hw = &pf->hw;
680         struct ixl_vsi          *vsi = &pf->vsi;
681         i40e_status             status;
682 #ifdef PCI_IOV
683         int                     error;
684 #endif
685
686         INIT_DEBUGOUT("ixl_detach: begin");
687
688         /* Make sure VLANS are not using driver */
689         if (vsi->ifp->if_vlantrunk != NULL) {
690                 device_printf(dev,"Vlan in use, detach first\n");
691                 return (EBUSY);
692         }
693
694 #ifdef PCI_IOV
695         error = pci_iov_detach(dev);
696         if (error != 0) {
697                 device_printf(dev, "SR-IOV in use; detach first.\n");
698                 return (error);
699         }
700 #endif
701
702         ether_ifdetach(vsi->ifp);
703         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
704                 ixl_stop(pf);
705
706         ixl_free_taskqueues(pf);
707
708         /* Shutdown LAN HMC */
709         status = i40e_shutdown_lan_hmc(hw);
710         if (status)
711                 device_printf(dev,
712                     "Shutdown LAN HMC failed with code %d\n", status);
713
714         /* Shutdown admin queue */
715         status = i40e_shutdown_adminq(hw);
716         if (status)
717                 device_printf(dev,
718                     "Shutdown Admin queue failed with code %d\n", status);
719
720         /* Unregister VLAN events */
721         if (vsi->vlan_attach != NULL)
722                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
723         if (vsi->vlan_detach != NULL)
724                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
725
726         callout_drain(&pf->timer);
727 #ifdef DEV_NETMAP
728         netmap_detach(vsi->ifp);
729 #endif /* DEV_NETMAP */
730         ixl_free_pci_resources(pf);
731         bus_generic_detach(dev);
732         if_free(vsi->ifp);
733         ixl_free_vsi(vsi);
734         IXL_PF_LOCK_DESTROY(pf);
735         return (0);
736 }
737
738 /*********************************************************************
739  *
740  *  Shutdown entry point
741  *
742  **********************************************************************/
743
744 static int
745 ixl_shutdown(device_t dev)
746 {
747         struct ixl_pf *pf = device_get_softc(dev);
748         ixl_stop(pf);
749         return (0);
750 }
751
752
753 /*********************************************************************
754  *
755  *  Get the hardware capabilities
756  *
757  **********************************************************************/
758
759 static int
760 ixl_get_hw_capabilities(struct ixl_pf *pf)
761 {
762         struct i40e_aqc_list_capabilities_element_resp *buf;
763         struct i40e_hw  *hw = &pf->hw;
764         device_t        dev = pf->dev;
765         int             error, len;
766         u16             needed;
767         bool            again = TRUE;
768
769         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770 retry:
771         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773                 device_printf(dev, "Unable to allocate cap memory\n");
774                 return (ENOMEM);
775         }
776
777         /* This populates the hw struct */
778         error = i40e_aq_discover_capabilities(hw, buf, len,
779             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780         free(buf, M_DEVBUF);
781         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782             (again == TRUE)) {
783                 /* retry once with a larger buffer */
784                 again = FALSE;
785                 len = needed;
786                 goto retry;
787         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788                 device_printf(dev, "capability discovery failed: %d\n",
789                     pf->hw.aq.asq_last_status);
790                 return (ENODEV);
791         }
792
793         /* Capture this PF's starting queue pair */
794         pf->qbase = hw->func_caps.base_queue;
795
796 #ifdef IXL_DEBUG
797         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799             hw->pf_id, hw->func_caps.num_vfs,
800             hw->func_caps.num_msix_vectors,
801             hw->func_caps.num_msix_vectors_vf,
802             hw->func_caps.fd_filters_guaranteed,
803             hw->func_caps.fd_filters_best_effort,
804             hw->func_caps.num_tx_qp,
805             hw->func_caps.num_rx_qp,
806             hw->func_caps.base_queue);
807 #endif
808         return (error);
809 }
810
811 static void
812 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813 {
814         device_t        dev = vsi->dev;
815
816         /* Enable/disable TXCSUM/TSO4 */
817         if (!(ifp->if_capenable & IFCAP_TXCSUM)
818             && !(ifp->if_capenable & IFCAP_TSO4)) {
819                 if (mask & IFCAP_TXCSUM) {
820                         ifp->if_capenable |= IFCAP_TXCSUM;
821                         /* enable TXCSUM, restore TSO if previously enabled */
822                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824                                 ifp->if_capenable |= IFCAP_TSO4;
825                         }
826                 }
827                 else if (mask & IFCAP_TSO4) {
828                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830                         device_printf(dev,
831                             "TSO4 requires txcsum, enabling both...\n");
832                 }
833         } else if((ifp->if_capenable & IFCAP_TXCSUM)
834             && !(ifp->if_capenable & IFCAP_TSO4)) {
835                 if (mask & IFCAP_TXCSUM)
836                         ifp->if_capenable &= ~IFCAP_TXCSUM;
837                 else if (mask & IFCAP_TSO4)
838                         ifp->if_capenable |= IFCAP_TSO4;
839         } else if((ifp->if_capenable & IFCAP_TXCSUM)
840             && (ifp->if_capenable & IFCAP_TSO4)) {
841                 if (mask & IFCAP_TXCSUM) {
842                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844                         device_printf(dev, 
845                             "TSO4 requires txcsum, disabling both...\n");
846                 } else if (mask & IFCAP_TSO4)
847                         ifp->if_capenable &= ~IFCAP_TSO4;
848         }
849
850         /* Enable/disable TXCSUM_IPV6/TSO6 */
851         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852             && !(ifp->if_capenable & IFCAP_TSO6)) {
853                 if (mask & IFCAP_TXCSUM_IPV6) {
854                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857                                 ifp->if_capenable |= IFCAP_TSO6;
858                         }
859                 } else if (mask & IFCAP_TSO6) {
860                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862                         device_printf(dev,
863                             "TSO6 requires txcsum6, enabling both...\n");
864                 }
865         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866             && !(ifp->if_capenable & IFCAP_TSO6)) {
867                 if (mask & IFCAP_TXCSUM_IPV6)
868                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869                 else if (mask & IFCAP_TSO6)
870                         ifp->if_capenable |= IFCAP_TSO6;
871         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872             && (ifp->if_capenable & IFCAP_TSO6)) {
873                 if (mask & IFCAP_TXCSUM_IPV6) {
874                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876                         device_printf(dev,
877                             "TSO6 requires txcsum6, disabling both...\n");
878                 } else if (mask & IFCAP_TSO6)
879                         ifp->if_capenable &= ~IFCAP_TSO6;
880         }
881 }
882
883 /*********************************************************************
884  *  Ioctl entry point
885  *
886  *  ixl_ioctl is called when the user wants to configure the
887  *  interface.
888  *
889  *  return 0 on success, positive on failure
890  **********************************************************************/
891
892 static int
893 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894 {
895         struct ixl_vsi  *vsi = ifp->if_softc;
896         struct ixl_pf   *pf = vsi->back;
897         struct ifreq    *ifr = (struct ifreq *)data;
898         struct ifdrv    *ifd = (struct ifdrv *)data;
899 #if defined(INET) || defined(INET6)
900         struct ifaddr *ifa = (struct ifaddr *)data;
901         bool            avoid_reset = FALSE;
902 #endif
903         int             error = 0;
904
905         switch (command) {
906
907         case SIOCSIFADDR:
908 #ifdef INET
909                 if (ifa->ifa_addr->sa_family == AF_INET)
910                         avoid_reset = TRUE;
911 #endif
912 #ifdef INET6
913                 if (ifa->ifa_addr->sa_family == AF_INET6)
914                         avoid_reset = TRUE;
915 #endif
916 #if defined(INET) || defined(INET6)
917                 /*
918                 ** Calling init results in link renegotiation,
919                 ** so we avoid doing it when possible.
920                 */
921                 if (avoid_reset) {
922                         ifp->if_flags |= IFF_UP;
923                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
924                                 ixl_init(pf);
925 #ifdef INET
926                         if (!(ifp->if_flags & IFF_NOARP))
927                                 arp_ifinit(ifp, ifa);
928 #endif
929                 } else
930                         error = ether_ioctl(ifp, command, data);
931                 break;
932 #endif
933         case SIOCSIFMTU:
934                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
935                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
936                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
937                         error = EINVAL;
938                 } else {
939                         IXL_PF_LOCK(pf);
940                         ifp->if_mtu = ifr->ifr_mtu;
941                         vsi->max_frame_size =
942                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
943                             + ETHER_VLAN_ENCAP_LEN;
944                         ixl_init_locked(pf);
945                         IXL_PF_UNLOCK(pf);
946                 }
947                 break;
948         case SIOCSIFFLAGS:
949                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
950                 IXL_PF_LOCK(pf);
951                 if (ifp->if_flags & IFF_UP) {
952                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
953                                 if ((ifp->if_flags ^ pf->if_flags) &
954                                     (IFF_PROMISC | IFF_ALLMULTI)) {
955                                         ixl_set_promisc(vsi);
956                                 }
957                         } else {
958                                 IXL_PF_UNLOCK(pf);
959                                 ixl_init(pf);
960                                 IXL_PF_LOCK(pf);
961                         }
962                 } else {
963                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
964                                 IXL_PF_UNLOCK(pf);
965                                 ixl_stop(pf);
966                                 IXL_PF_LOCK(pf);
967                         }
968                 }
969                 pf->if_flags = ifp->if_flags;
970                 IXL_PF_UNLOCK(pf);
971                 break;
972         case SIOCSDRVSPEC:
973         case SIOCGDRVSPEC:
974                 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
975                     "Info)\n");
976
977                 /* NVM update command */
978                 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
979                         error = ixl_handle_nvmupd_cmd(pf, ifd);
980                 else
981                         error = EINVAL;
982                 break;
983         case SIOCADDMULTI:
984                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
985                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986                         IXL_PF_LOCK(pf);
987                         ixl_disable_intr(vsi);
988                         ixl_add_multi(vsi);
989                         ixl_enable_intr(vsi);
990                         IXL_PF_UNLOCK(pf);
991                 }
992                 break;
993         case SIOCDELMULTI:
994                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
995                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
996                         IXL_PF_LOCK(pf);
997                         ixl_disable_intr(vsi);
998                         ixl_del_multi(vsi);
999                         ixl_enable_intr(vsi);
1000                         IXL_PF_UNLOCK(pf);
1001                 }
1002                 break;
1003         case SIOCSIFMEDIA:
1004         case SIOCGIFMEDIA:
1005 #ifdef IFM_ETH_XTYPE
1006         case SIOCGIFXMEDIA:
1007 #endif
1008                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1009                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1010                 break;
1011         case SIOCSIFCAP:
1012         {
1013                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1014                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1015
1016                 ixl_cap_txcsum_tso(vsi, ifp, mask);
1017
1018                 if (mask & IFCAP_RXCSUM)
1019                         ifp->if_capenable ^= IFCAP_RXCSUM;
1020                 if (mask & IFCAP_RXCSUM_IPV6)
1021                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1022                 if (mask & IFCAP_LRO)
1023                         ifp->if_capenable ^= IFCAP_LRO;
1024                 if (mask & IFCAP_VLAN_HWTAGGING)
1025                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1026                 if (mask & IFCAP_VLAN_HWFILTER)
1027                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1028                 if (mask & IFCAP_VLAN_HWTSO)
1029                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1030                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1031                         IXL_PF_LOCK(pf);
1032                         ixl_init_locked(pf);
1033                         IXL_PF_UNLOCK(pf);
1034                 }
1035                 VLAN_CAPABILITIES(ifp);
1036
1037                 break;
1038         }
1039
1040         default:
1041                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1042                 error = ether_ioctl(ifp, command, data);
1043                 break;
1044         }
1045
1046         return (error);
1047 }
1048
1049
1050 /*********************************************************************
1051  *  Init entry point
1052  *
1053  *  This routine is used in two ways. It is used by the stack as
1054  *  init entry point in network interface structure. It is also used
1055  *  by the driver as a hw/sw initialization routine to get to a
1056  *  consistent state.
1057  *
1058  *  return 0 on success, positive on failure
1059  **********************************************************************/
1060
1061 static void
1062 ixl_init_locked(struct ixl_pf *pf)
1063 {
1064         struct i40e_hw  *hw = &pf->hw;
1065         struct ixl_vsi  *vsi = &pf->vsi;
1066         struct ifnet    *ifp = vsi->ifp;
1067         device_t        dev = pf->dev;
1068         struct i40e_filter_control_settings     filter;
1069         u8              tmpaddr[ETHER_ADDR_LEN];
1070         int             ret;
1071
1072         mtx_assert(&pf->pf_mtx, MA_OWNED);
1073         INIT_DEBUGOUT("ixl_init: begin");
1074
1075         ixl_stop_locked(pf);
1076
1077         /* Get the latest mac address... User might use a LAA */
1078         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1079               I40E_ETH_LENGTH_OF_ADDRESS);
1080         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1081             (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1082                 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1083                 bcopy(tmpaddr, hw->mac.addr,
1084                     I40E_ETH_LENGTH_OF_ADDRESS);
1085                 ret = i40e_aq_mac_address_write(hw,
1086                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1087                     hw->mac.addr, NULL);
1088                 if (ret) {
1089                         device_printf(dev, "LLA address"
1090                          "change failed!!\n");
1091                         return;
1092                 }
1093         }
1094
1095         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1096
1097         /* Set the various hardware offload abilities */
1098         ifp->if_hwassist = 0;
1099         if (ifp->if_capenable & IFCAP_TSO)
1100                 ifp->if_hwassist |= CSUM_TSO;
1101         if (ifp->if_capenable & IFCAP_TXCSUM)
1102                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1103         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1104                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1105
1106         /* Set up the device filtering */
1107         bzero(&filter, sizeof(filter));
1108         filter.enable_ethtype = TRUE;
1109         filter.enable_macvlan = TRUE;
1110 #ifdef IXL_FDIR
1111         filter.enable_fdir = TRUE;
1112 #endif
1113         filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
1114         if (i40e_set_filter_control(hw, &filter))
1115                 device_printf(dev, "i40e_set_filter_control() failed\n");
1116
1117         /* Set up RSS */
1118         ixl_config_rss(vsi);
1119
1120         /* Prepare the VSI: rings, hmc contexts, etc... */
1121         if (ixl_initialize_vsi(vsi)) {
1122                 device_printf(dev, "initialize vsi failed!!\n");
1123                 return;
1124         }
1125
1126         /* Add protocol filters to list */
1127         ixl_init_filters(vsi);
1128
1129         /* Setup vlan's if needed */
1130         ixl_setup_vlan_filters(vsi);
1131
1132         /* Set up MSI/X routing and the ITR settings */
1133         if (ixl_enable_msix) {
1134                 ixl_configure_msix(pf);
1135                 ixl_configure_itr(pf);
1136         } else
1137                 ixl_configure_legacy(pf);
1138
1139         ixl_enable_rings(vsi);
1140
1141         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1142
1143         ixl_reconfigure_filters(vsi);
1144
1145         /* And now turn on interrupts */
1146         ixl_enable_intr(vsi);
1147
1148         /* Get link info */
1149         hw->phy.get_link_info = TRUE;
1150         i40e_get_link_status(hw, &pf->link_up);
1151         ixl_update_link_status(pf);
1152
1153         /* Start the local timer */
1154         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1155
1156         /* Now inform the stack we're ready */
1157         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1158
1159         return;
1160 }
1161
1162 static int
1163 ixl_teardown_hw_structs(struct ixl_pf *pf)
1164 {
1165         enum i40e_status_code status = 0;
1166         struct i40e_hw *hw = &pf->hw;
1167         device_t dev = pf->dev;
1168
1169         /* Shutdown LAN HMC */
1170         if (hw->hmc.hmc_obj) {
1171                 status = i40e_shutdown_lan_hmc(hw);
1172                 if (status) {
1173                         device_printf(dev,
1174                             "init: LAN HMC shutdown failure; status %d\n", status);
1175                         goto err_out;
1176                 }
1177         }
1178
1179         // XXX: This gets called when we know the adminq is inactive;
1180         // so we already know it's setup when we get here.
1181
1182         /* Shutdown admin queue */
1183         status = i40e_shutdown_adminq(hw);
1184         if (status)
1185                 device_printf(dev,
1186                     "init: Admin Queue shutdown failure; status %d\n", status);
1187
1188 err_out:
1189         return (status);
1190 }
1191
1192 static int
1193 ixl_reset(struct ixl_pf *pf)
1194 {
1195         struct i40e_hw *hw = &pf->hw;
1196         device_t dev = pf->dev;
1197         int error = 0;
1198
1199         // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
1200         i40e_clear_hw(hw);
1201         error = i40e_pf_reset(hw);
1202         if (error) {
1203                 device_printf(dev, "init: PF reset failure");
1204                 error = EIO;
1205                 goto err_out;
1206         }
1207
1208         error = i40e_init_adminq(hw);
1209         if (error) {
1210                 device_printf(dev, "init: Admin queue init failure; status code %d", error);
1211                 error = EIO;
1212                 goto err_out;
1213         }
1214
1215         i40e_clear_pxe_mode(hw);
1216
1217         error = ixl_get_hw_capabilities(pf);
1218         if (error) {
1219                 device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error);
1220                 goto err_out;
1221         }
1222
1223         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1224             hw->func_caps.num_rx_qp, 0, 0);
1225         if (error) {
1226                 device_printf(dev, "init: LAN HMC init failed; status code %d\n", error);
1227                 error = EIO;
1228                 goto err_out;
1229         }
1230
1231         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1232         if (error) {
1233                 device_printf(dev, "init: LAN HMC config failed; status code %d\n", error);
1234                 error = EIO;
1235                 goto err_out;
1236         }
1237
1238         // XXX: need to do switch config here?
1239
1240         error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1241             NULL);
1242         if (error) {
1243                 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1244                     " aq_err %d\n", error, hw->aq.asq_last_status);
1245                 error = EIO;
1246                 goto err_out;
1247         }
1248
1249         u8 set_fc_err_mask;
1250         error = i40e_set_fc(hw, &set_fc_err_mask, true);
1251         if (error) {
1252                 device_printf(dev, "init: setting link flow control failed; retcode %d,"
1253                     " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1254                 goto err_out;
1255         }
1256
1257         // XXX: (Rebuild VSIs?)
1258
1259         /* Firmware delay workaround */
1260         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
1261             (hw->aq.fw_maj_ver < 4)) {
1262                 i40e_msec_delay(75);
1263                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
1264                 if (error) {
1265                         device_printf(dev, "init: link restart failed, aq_err %d\n",
1266                             hw->aq.asq_last_status);
1267                         goto err_out;
1268                 }
1269         }
1270
1271
1272 err_out:
1273         return (error);
1274 }
1275
1276 static void
1277 ixl_init(void *arg)
1278 {
1279         struct ixl_pf *pf = arg;
1280         int ret = 0;
1281
1282         /*
1283          * If the aq is dead here, it probably means something outside of the driver
1284          * did something to the adapter, like a PF reset.
1285          * So rebuild the driver's state here if that occurs.
1286          */
1287         if (!i40e_check_asq_alive(&pf->hw)) {
1288                 device_printf(pf->dev, "asq is not alive; rebuilding...\n");
1289                 IXL_PF_LOCK(pf);
1290                 ixl_teardown_hw_structs(pf);
1291                 ixl_reset(pf);
1292                 IXL_PF_UNLOCK(pf);
1293         }
1294
1295         /* Set up interrupt routing here */
1296         if (pf->msix > 1)
1297                 ret = ixl_assign_vsi_msix(pf);
1298         else
1299                 ret = ixl_assign_vsi_legacy(pf);
1300         if (ret) {
1301                 device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret);
1302                 return;
1303         }
1304
1305         IXL_PF_LOCK(pf);
1306         ixl_init_locked(pf);
1307         IXL_PF_UNLOCK(pf);
1308         return;
1309 }
1310
1311 /*
1312 **
1313 ** MSIX Interrupt Handlers and Tasklets
1314 **
1315 */
1316 static void
1317 ixl_handle_que(void *context, int pending)
1318 {
1319         struct ixl_queue *que = context;
1320         struct ixl_vsi *vsi = que->vsi;
1321         struct i40e_hw  *hw = vsi->hw;
1322         struct tx_ring  *txr = &que->txr;
1323         struct ifnet    *ifp = vsi->ifp;
1324         bool            more;
1325
1326         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1327                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1328                 IXL_TX_LOCK(txr);
1329                 ixl_txeof(que);
1330                 if (!drbr_empty(ifp, txr->br))
1331                         ixl_mq_start_locked(ifp, txr);
1332                 IXL_TX_UNLOCK(txr);
1333                 if (more) {
1334                         taskqueue_enqueue(que->tq, &que->task);
1335                         return;
1336                 }
1337         }
1338
1339         /* Reenable this interrupt - hmmm */
1340         ixl_enable_queue(hw, que->me);
1341         return;
1342 }
1343
1344
1345 /*********************************************************************
1346  *
1347  *  Legacy Interrupt Service routine
1348  *
1349  **********************************************************************/
1350 void
1351 ixl_intr(void *arg)
1352 {
1353         struct ixl_pf           *pf = arg;
1354         struct i40e_hw          *hw =  &pf->hw;
1355         struct ixl_vsi          *vsi = &pf->vsi;
1356         struct ixl_queue        *que = vsi->queues;
1357         struct ifnet            *ifp = vsi->ifp;
1358         struct tx_ring          *txr = &que->txr;
1359         u32                     reg, icr0, mask;
1360         bool                    more_tx, more_rx;
1361
1362         ++que->irqs;
1363
1364         /* Protect against spurious interrupts */
1365         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1366                 return;
1367
1368         icr0 = rd32(hw, I40E_PFINT_ICR0);
1369
1370         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1371         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1372         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1373
1374         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1375
1376 #ifdef PCI_IOV
1377         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1378                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1379 #endif
1380
1381         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1382                 taskqueue_enqueue(pf->tq, &pf->adminq);
1383                 return;
1384         }
1385
1386         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1387
1388         IXL_TX_LOCK(txr);
1389         more_tx = ixl_txeof(que);
1390         if (!drbr_empty(vsi->ifp, txr->br))
1391                 more_tx = 1;
1392         IXL_TX_UNLOCK(txr);
1393
1394         /* re-enable other interrupt causes */
1395         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1396
1397         /* And now the queues */
1398         reg = rd32(hw, I40E_QINT_RQCTL(0));
1399         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1400         wr32(hw, I40E_QINT_RQCTL(0), reg);
1401
1402         reg = rd32(hw, I40E_QINT_TQCTL(0));
1403         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1404         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1405         wr32(hw, I40E_QINT_TQCTL(0), reg);
1406
1407         ixl_enable_legacy(hw);
1408
1409         return;
1410 }
1411
1412
1413 /*********************************************************************
1414  *
1415  *  MSIX VSI Interrupt Service routine
1416  *
1417  **********************************************************************/
1418 void
1419 ixl_msix_que(void *arg)
1420 {
1421         struct ixl_queue        *que = arg;
1422         struct ixl_vsi  *vsi = que->vsi;
1423         struct i40e_hw  *hw = vsi->hw;
1424         struct tx_ring  *txr = &que->txr;
1425         bool            more_tx, more_rx;
1426
1427         /* Protect against spurious interrupts */
1428         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1429                 return;
1430
1431         ++que->irqs;
1432
1433         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1434
1435         IXL_TX_LOCK(txr);
1436         more_tx = ixl_txeof(que);
1437         /*
1438         ** Make certain that if the stack 
1439         ** has anything queued the task gets
1440         ** scheduled to handle it.
1441         */
1442         if (!drbr_empty(vsi->ifp, txr->br))
1443                 more_tx = 1;
1444         IXL_TX_UNLOCK(txr);
1445
1446         ixl_set_queue_rx_itr(que);
1447         ixl_set_queue_tx_itr(que);
1448
1449         if (more_tx || more_rx)
1450                 taskqueue_enqueue(que->tq, &que->task);
1451         else
1452                 ixl_enable_queue(hw, que->me);
1453
1454         return;
1455 }
1456
1457
1458 /*********************************************************************
1459  *
1460  *  MSIX Admin Queue Interrupt Service routine
1461  *
1462  **********************************************************************/
1463 static void
1464 ixl_msix_adminq(void *arg)
1465 {
1466         struct ixl_pf   *pf = arg;
1467         struct i40e_hw  *hw = &pf->hw;
1468         u32             reg, mask, rstat_reg;
1469         bool            do_task = FALSE;
1470
1471         ++pf->admin_irq;
1472
1473         reg = rd32(hw, I40E_PFINT_ICR0);
1474         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1475
1476         /* Check on the cause */
1477         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
1478                 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
1479                 do_task = TRUE;
1480         }
1481
1482         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1483                 ixl_handle_mdd_event(pf);
1484                 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
1485         }
1486
1487         if (reg & I40E_PFINT_ICR0_GRST_MASK) {
1488                 device_printf(pf->dev, "Reset Requested!\n");
1489                 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
1490                 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
1491                     >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
1492                 device_printf(pf->dev, "Reset type: ");
1493                 switch (rstat_reg) {
1494                 /* These others might be handled similarly to an EMPR reset */
1495                 case I40E_RESET_CORER:
1496                         printf("CORER\n");
1497                         break;
1498                 case I40E_RESET_GLOBR:
1499                         printf("GLOBR\n");
1500                         break;
1501                 case I40E_RESET_EMPR:
1502                         printf("EMPR\n");
1503                         atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
1504                         break;
1505                 default:
1506                         printf("?\n");
1507                         break;
1508                 }
1509                 // overload admin queue task to check reset progress?
1510                 do_task = TRUE;
1511         }
1512
1513         if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
1514                 device_printf(pf->dev, "ECC Error detected!\n");
1515         }
1516
1517         if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
1518                 device_printf(pf->dev, "HMC Error detected!\n");
1519         }
1520
1521         if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
1522                 device_printf(pf->dev, "PCI Exception detected!\n");
1523         }
1524
1525 #ifdef PCI_IOV
1526         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1527                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1528                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1529         }
1530 #endif
1531
1532         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1533         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1534         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1535
1536         if (do_task)
1537                 taskqueue_enqueue(pf->tq, &pf->adminq);
1538 }
1539
1540 /*********************************************************************
1541  *
1542  *  Media Ioctl callback
1543  *
1544  *  This routine is called whenever the user queries the status of
1545  *  the interface using ifconfig.
1546  *
1547  **********************************************************************/
1548 static void
1549 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1550 {
1551         struct ixl_vsi  *vsi = ifp->if_softc;
1552         struct ixl_pf   *pf = vsi->back;
1553         struct i40e_hw  *hw = &pf->hw;
1554
1555         INIT_DEBUGOUT("ixl_media_status: begin");
1556         IXL_PF_LOCK(pf);
1557
1558         hw->phy.get_link_info = TRUE;
1559         i40e_get_link_status(hw, &pf->link_up);
1560         ixl_update_link_status(pf);
1561
1562         ifmr->ifm_status = IFM_AVALID;
1563         ifmr->ifm_active = IFM_ETHER;
1564
1565         if (!pf->link_up) {
1566                 IXL_PF_UNLOCK(pf);
1567                 return;
1568         }
1569
1570         ifmr->ifm_status |= IFM_ACTIVE;
1571
1572         /* Hardware always does full-duplex */
1573         ifmr->ifm_active |= IFM_FDX;
1574
1575         switch (hw->phy.link_info.phy_type) {
1576                 /* 100 M */
1577                 case I40E_PHY_TYPE_100BASE_TX:
1578                         ifmr->ifm_active |= IFM_100_TX;
1579                         break;
1580                 /* 1 G */
1581                 case I40E_PHY_TYPE_1000BASE_T:
1582                         ifmr->ifm_active |= IFM_1000_T;
1583                         break;
1584                 case I40E_PHY_TYPE_1000BASE_SX:
1585                         ifmr->ifm_active |= IFM_1000_SX;
1586                         break;
1587                 case I40E_PHY_TYPE_1000BASE_LX:
1588                         ifmr->ifm_active |= IFM_1000_LX;
1589                         break;
1590                 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1591                         ifmr->ifm_active |= IFM_OTHER;
1592                         break;
1593                 /* 10 G */
1594                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1595                         ifmr->ifm_active |= IFM_10G_TWINAX;
1596                         break;
1597                 case I40E_PHY_TYPE_10GBASE_SR:
1598                         ifmr->ifm_active |= IFM_10G_SR;
1599                         break;
1600                 case I40E_PHY_TYPE_10GBASE_LR:
1601                         ifmr->ifm_active |= IFM_10G_LR;
1602                         break;
1603                 case I40E_PHY_TYPE_10GBASE_T:
1604                         ifmr->ifm_active |= IFM_10G_T;
1605                         break;
1606                 case I40E_PHY_TYPE_XAUI:
1607                 case I40E_PHY_TYPE_XFI:
1608                 case I40E_PHY_TYPE_10GBASE_AOC:
1609                         ifmr->ifm_active |= IFM_OTHER;
1610                         break;
1611                 /* 40 G */
1612                 case I40E_PHY_TYPE_40GBASE_CR4:
1613                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1614                         ifmr->ifm_active |= IFM_40G_CR4;
1615                         break;
1616                 case I40E_PHY_TYPE_40GBASE_SR4:
1617                         ifmr->ifm_active |= IFM_40G_SR4;
1618                         break;
1619                 case I40E_PHY_TYPE_40GBASE_LR4:
1620                         ifmr->ifm_active |= IFM_40G_LR4;
1621                         break;
1622                 case I40E_PHY_TYPE_XLAUI:
1623                         ifmr->ifm_active |= IFM_OTHER;
1624                         break;
1625 #ifndef IFM_ETH_XTYPE
1626                 case I40E_PHY_TYPE_1000BASE_KX:
1627                         ifmr->ifm_active |= IFM_1000_CX;
1628                         break;
1629                 case I40E_PHY_TYPE_SGMII:
1630                         ifmr->ifm_active |= IFM_OTHER;
1631                         break;
1632                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1633                 case I40E_PHY_TYPE_10GBASE_CR1:
1634                         ifmr->ifm_active |= IFM_10G_TWINAX;
1635                         break;
1636                 case I40E_PHY_TYPE_10GBASE_KX4:
1637                         ifmr->ifm_active |= IFM_10G_CX4;
1638                         break;
1639                 case I40E_PHY_TYPE_10GBASE_KR:
1640                         ifmr->ifm_active |= IFM_10G_SR;
1641                         break;
1642                 case I40E_PHY_TYPE_SFI:
1643                         ifmr->ifm_active |= IFM_OTHER;
1644                         break;
1645                 case I40E_PHY_TYPE_40GBASE_KR4:
1646                 case I40E_PHY_TYPE_XLPPI:
1647                 case I40E_PHY_TYPE_40GBASE_AOC:
1648                         ifmr->ifm_active |= IFM_40G_SR4;
1649                         break;
1650 #else
1651                 case I40E_PHY_TYPE_1000BASE_KX:
1652                         ifmr->ifm_active |= IFM_1000_KX;
1653                         break;
1654                 case I40E_PHY_TYPE_SGMII:
1655                         ifmr->ifm_active |= IFM_1000_SGMII;
1656                         break;
1657                 /* ERJ: What's the difference between these? */
1658                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1659                 case I40E_PHY_TYPE_10GBASE_CR1:
1660                         ifmr->ifm_active |= IFM_10G_CR1;
1661                         break;
1662                 case I40E_PHY_TYPE_10GBASE_KX4:
1663                         ifmr->ifm_active |= IFM_10G_KX4;
1664                         break;
1665                 case I40E_PHY_TYPE_10GBASE_KR:
1666                         ifmr->ifm_active |= IFM_10G_KR;
1667                         break;
1668                 case I40E_PHY_TYPE_SFI:
1669                         ifmr->ifm_active |= IFM_10G_SFI;
1670                         break;
1671                 /* Our single 20G media type */
1672                 case I40E_PHY_TYPE_20GBASE_KR2:
1673                         ifmr->ifm_active |= IFM_20G_KR2;
1674                         break;
1675                 case I40E_PHY_TYPE_40GBASE_KR4:
1676                         ifmr->ifm_active |= IFM_40G_KR4;
1677                         break;
1678                 case I40E_PHY_TYPE_XLPPI:
1679                 case I40E_PHY_TYPE_40GBASE_AOC:
1680                         ifmr->ifm_active |= IFM_40G_XLPPI;
1681                         break;
1682 #endif
1683                 /* Unknown to driver */
1684                 default:
1685                         ifmr->ifm_active |= IFM_UNKNOWN;
1686                         break;
1687         }
1688         /* Report flow control status as well */
1689         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1690                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1691         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1692                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1693
1694         IXL_PF_UNLOCK(pf);
1695
1696         return;
1697 }
1698
1699 /*
1700  * NOTE: Fortville does not support forcing media speeds. Instead,
1701  * use the set_advertise sysctl to set the speeds Fortville
1702  * will advertise or be allowed to operate at.
1703  */
1704 static int
1705 ixl_media_change(struct ifnet * ifp)
1706 {
1707         struct ixl_vsi *vsi = ifp->if_softc;
1708         struct ifmedia *ifm = &vsi->media;
1709
1710         INIT_DEBUGOUT("ixl_media_change: begin");
1711
1712         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1713                 return (EINVAL);
1714
1715         if_printf(ifp, "Media change is not supported.\n");
1716
1717         return (ENODEV);
1718 }
1719
1720
1721 #ifdef IXL_FDIR
1722 /*
1723 ** ATR: Application Targetted Receive - creates a filter
1724 **      based on TX flow info that will keep the receive
1725 **      portion of the flow on the same queue. Based on the
1726 **      implementation this is only available for TCP connections
1727 */
1728 void
1729 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1730 {
1731         struct ixl_vsi                  *vsi = que->vsi;
1732         struct tx_ring                  *txr = &que->txr;
1733         struct i40e_filter_program_desc *FDIR;
1734         u32                             ptype, dtype;
1735         int                             idx;
1736
1737         /* check if ATR is enabled and sample rate */
1738         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1739                 return;
1740         /*
1741         ** We sample all TCP SYN/FIN packets,
1742         ** or at the selected sample rate 
1743         */
1744         txr->atr_count++;
1745         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1746             (txr->atr_count < txr->atr_rate))
1747                 return;
1748         txr->atr_count = 0;
1749
1750         /* Get a descriptor to use */
1751         idx = txr->next_avail;
1752         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1753         if (++idx == que->num_desc)
1754                 idx = 0;
1755         txr->avail--;
1756         txr->next_avail = idx;
1757
1758         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1759             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1760
1761         ptype |= (etype == ETHERTYPE_IP) ?
1762             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1763             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1764             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1765             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1766
1767         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1768
1769         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1770
1771         /*
1772         ** We use the TCP TH_FIN as a trigger to remove
1773         ** the filter, otherwise its an update.
1774         */
1775         dtype |= (th->th_flags & TH_FIN) ?
1776             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1777             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1778             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1779             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1780
1781         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1782             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1783
1784         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1785             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1786
1787         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1788         FDIR->dtype_cmd_cntindex = htole32(dtype);
1789         return;
1790 }
1791 #endif
1792
1793
1794 static void
1795 ixl_set_promisc(struct ixl_vsi *vsi)
1796 {
1797         struct ifnet    *ifp = vsi->ifp;
1798         struct i40e_hw  *hw = vsi->hw;
1799         int             err, mcnt = 0;
1800         bool            uni = FALSE, multi = FALSE;
1801
1802         if (ifp->if_flags & IFF_ALLMULTI)
1803                 multi = TRUE;
1804         else { /* Need to count the multicast addresses */
1805                 struct  ifmultiaddr *ifma;
1806                 if_maddr_rlock(ifp);
1807                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1808                         if (ifma->ifma_addr->sa_family != AF_LINK)
1809                                 continue;
1810                         if (mcnt == MAX_MULTICAST_ADDR)
1811                                 break;
1812                         mcnt++;
1813                 }
1814                 if_maddr_runlock(ifp);
1815         }
1816
1817         if (mcnt >= MAX_MULTICAST_ADDR)
1818                 multi = TRUE;
1819         if (ifp->if_flags & IFF_PROMISC)
1820                 uni = TRUE;
1821
1822         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1823             vsi->seid, uni, NULL);
1824         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1825             vsi->seid, multi, NULL);
1826         return;
1827 }
1828
1829 /*********************************************************************
1830  *      Filter Routines
1831  *
1832  *      Routines for multicast and vlan filter management.
1833  *
1834  *********************************************************************/
1835 static void
1836 ixl_add_multi(struct ixl_vsi *vsi)
1837 {
1838         struct  ifmultiaddr     *ifma;
1839         struct ifnet            *ifp = vsi->ifp;
1840         struct i40e_hw          *hw = vsi->hw;
1841         int                     mcnt = 0, flags;
1842
1843         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1844
1845         if_maddr_rlock(ifp);
1846         /*
1847         ** First just get a count, to decide if we
1848         ** we simply use multicast promiscuous.
1849         */
1850         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1851                 if (ifma->ifma_addr->sa_family != AF_LINK)
1852                         continue;
1853                 mcnt++;
1854         }
1855         if_maddr_runlock(ifp);
1856
1857         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1858                 /* delete existing MC filters */
1859                 ixl_del_hw_filters(vsi, mcnt);
1860                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1861                     vsi->seid, TRUE, NULL);
1862                 return;
1863         }
1864
1865         mcnt = 0;
1866         if_maddr_rlock(ifp);
1867         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1868                 if (ifma->ifma_addr->sa_family != AF_LINK)
1869                         continue;
1870                 ixl_add_mc_filter(vsi,
1871                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1872                 mcnt++;
1873         }
1874         if_maddr_runlock(ifp);
1875         if (mcnt > 0) {
1876                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1877                 ixl_add_hw_filters(vsi, flags, mcnt);
1878         }
1879
1880         IOCTL_DEBUGOUT("ixl_add_multi: end");
1881         return;
1882 }
1883
1884 static void
1885 ixl_del_multi(struct ixl_vsi *vsi)
1886 {
1887         struct ifnet            *ifp = vsi->ifp;
1888         struct ifmultiaddr      *ifma;
1889         struct ixl_mac_filter   *f;
1890         int                     mcnt = 0;
1891         bool            match = FALSE;
1892
1893         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1894
1895         /* Search for removed multicast addresses */
1896         if_maddr_rlock(ifp);
1897         SLIST_FOREACH(f, &vsi->ftl, next) {
1898                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1899                         match = FALSE;
1900                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1901                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1902                                         continue;
1903                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1904                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1905                                         match = TRUE;
1906                                         break;
1907                                 }
1908                         }
1909                         if (match == FALSE) {
1910                                 f->flags |= IXL_FILTER_DEL;
1911                                 mcnt++;
1912                         }
1913                 }
1914         }
1915         if_maddr_runlock(ifp);
1916
1917         if (mcnt > 0)
1918                 ixl_del_hw_filters(vsi, mcnt);
1919 }
1920
1921
1922 /*********************************************************************
1923  *  Timer routine
1924  *
1925  *  This routine checks for link status,updates statistics,
1926  *  and runs the watchdog check.
1927  *
1928  *  Only runs when the driver is configured UP and RUNNING.
1929  *
1930  **********************************************************************/
1931
1932 static void
1933 ixl_local_timer(void *arg)
1934 {
1935         struct ixl_pf           *pf = arg;
1936         struct i40e_hw          *hw = &pf->hw;
1937         struct ixl_vsi          *vsi = &pf->vsi;
1938         struct ixl_queue        *que = vsi->queues;
1939         device_t                dev = pf->dev;
1940         int                     hung = 0;
1941         u32                     mask;
1942
1943         mtx_assert(&pf->pf_mtx, MA_OWNED);
1944
1945         /* Fire off the adminq task */
1946         taskqueue_enqueue(pf->tq, &pf->adminq);
1947
1948         /* Update stats */
1949         ixl_update_stats_counters(pf);
1950
1951         /*
1952         ** Check status of the queues
1953         */
1954         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1955                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1956  
1957         for (int i = 0; i < vsi->num_queues; i++,que++) {
1958                 /* Any queues with outstanding work get a sw irq */
1959                 if (que->busy)
1960                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1961                 /*
1962                 ** Each time txeof runs without cleaning, but there
1963                 ** are uncleaned descriptors it increments busy. If
1964                 ** we get to 5 we declare it hung.
1965                 */
1966                 if (que->busy == IXL_QUEUE_HUNG) {
1967                         ++hung;
1968                         /* Mark the queue as inactive */
1969                         vsi->active_queues &= ~((u64)1 << que->me);
1970                         continue;
1971                 } else {
1972                         /* Check if we've come back from hung */
1973                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1974                                 vsi->active_queues |= ((u64)1 << que->me);
1975                 }
1976                 if (que->busy >= IXL_MAX_TX_BUSY) {
1977 #ifdef IXL_DEBUG
1978                         device_printf(dev,"Warning queue %d "
1979                             "appears to be hung!\n", i);
1980 #endif
1981                         que->busy = IXL_QUEUE_HUNG;
1982                         ++hung;
1983                 }
1984         }
1985         /* Only reinit if all queues show hung */
1986         if (hung == vsi->num_queues)
1987                 goto hung;
1988
1989         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1990         return;
1991
1992 hung:
1993         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1994         ixl_init_locked(pf);
1995 }
1996
1997 /*
1998 ** Note: this routine updates the OS on the link state
1999 **      the real check of the hardware only happens with
2000 **      a link interrupt.
2001 */
2002 static void
2003 ixl_update_link_status(struct ixl_pf *pf)
2004 {
2005         struct ixl_vsi          *vsi = &pf->vsi;
2006         struct i40e_hw          *hw = &pf->hw;
2007         struct ifnet            *ifp = vsi->ifp;
2008         device_t                dev = pf->dev;
2009
2010         if (pf->link_up) {
2011                 if (vsi->link_active == FALSE) {
2012                         pf->fc = hw->fc.current_mode;
2013                         if (bootverbose) {
2014                                 device_printf(dev,"Link is up %d Gbps %s,"
2015                                     " Flow Control: %s\n",
2016                                     ((pf->link_speed ==
2017                                     I40E_LINK_SPEED_40GB)? 40:10),
2018                                     "Full Duplex", ixl_fc_string[pf->fc]);
2019                         }
2020                         vsi->link_active = TRUE;
2021                         /*
2022                         ** Warn user if link speed on NPAR enabled
2023                         ** partition is not at least 10GB
2024                         */
2025                         if (hw->func_caps.npar_enable &&
2026                            (hw->phy.link_info.link_speed ==
2027                            I40E_LINK_SPEED_1GB ||
2028                            hw->phy.link_info.link_speed ==
2029                            I40E_LINK_SPEED_100MB))
2030                                 device_printf(dev, "The partition detected"
2031                                     "link speed that is less than 10Gbps\n");
2032                         if_link_state_change(ifp, LINK_STATE_UP);
2033                 }
2034         } else { /* Link down */
2035                 if (vsi->link_active == TRUE) {
2036                         if (bootverbose)
2037                                 device_printf(dev, "Link is Down\n");
2038                         if_link_state_change(ifp, LINK_STATE_DOWN);
2039                         vsi->link_active = FALSE;
2040                 }
2041         }
2042
2043         return;
2044 }
2045
2046 static void
2047 ixl_stop(struct ixl_pf *pf)
2048 {
2049         IXL_PF_LOCK(pf);
2050         ixl_stop_locked(pf);
2051         IXL_PF_UNLOCK(pf);
2052
2053         ixl_free_interrupt_resources(pf);
2054 }
2055
2056 /*********************************************************************
2057  *
2058  *  This routine disables all traffic on the adapter by issuing a
2059  *  global reset on the MAC and deallocates TX/RX buffers.
2060  *
2061  **********************************************************************/
2062
2063 static void
2064 ixl_stop_locked(struct ixl_pf *pf)
2065 {
2066         struct ixl_vsi  *vsi = &pf->vsi;
2067         struct ifnet    *ifp = vsi->ifp;
2068
2069         INIT_DEBUGOUT("ixl_stop: begin\n");
2070
2071         IXL_PF_LOCK_ASSERT(pf);
2072
2073         /* Stop the local timer */
2074         callout_stop(&pf->timer);
2075
2076         if (pf->num_vfs == 0)
2077                 ixl_disable_intr(vsi);
2078         else
2079                 ixl_disable_rings_intr(vsi);
2080         ixl_disable_rings(vsi);
2081
2082         /* Tell the stack that the interface is no longer active */
2083         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2084 }
2085
2086
2087 /*********************************************************************
2088  *
2089  *  Setup MSIX Interrupt resources and handlers for the VSI
2090  *
2091  **********************************************************************/
2092 static int
2093 ixl_assign_vsi_legacy(struct ixl_pf *pf)
2094 {
2095         device_t        dev = pf->dev;
2096         struct          ixl_vsi *vsi = &pf->vsi;
2097         struct          ixl_queue *que = vsi->queues;
2098         int             error, rid = 0;
2099
2100         if (pf->msix == 1)
2101                 rid = 1;
2102         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2103             &rid, RF_SHAREABLE | RF_ACTIVE);
2104         if (pf->res == NULL) {
2105                 device_printf(dev, "Unable to allocate"
2106                     " bus resource: vsi legacy/msi interrupt\n");
2107                 return (ENXIO);
2108         }
2109
2110         /* Set the handler function */
2111         error = bus_setup_intr(dev, pf->res,
2112             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2113             ixl_intr, pf, &pf->tag);
2114         if (error) {
2115                 pf->res = NULL;
2116                 device_printf(dev, "Failed to register legacy/msi handler\n");
2117                 return (error);
2118         }
2119         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
2120         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2121         TASK_INIT(&que->task, 0, ixl_handle_que, que);
2122         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2123             taskqueue_thread_enqueue, &que->tq);
2124         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2125             device_get_nameunit(dev));
2126         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2127
2128 #ifdef PCI_IOV
2129         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2130 #endif
2131
2132         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2133             taskqueue_thread_enqueue, &pf->tq);
2134         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2135             device_get_nameunit(dev));
2136
2137         return (0);
2138 }
2139
2140 static void
2141 ixl_init_taskqueues(struct ixl_pf *pf)
2142 {
2143         struct ixl_vsi *vsi = &pf->vsi;
2144         struct ixl_queue *que = vsi->queues;
2145         device_t dev = pf->dev;
2146
2147         /* Tasklet for Admin Queue */
2148         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2149 #ifdef PCI_IOV
2150         /* VFLR Tasklet */
2151         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2152 #endif
2153
2154         /* Create and start PF taskqueue */
2155         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2156             taskqueue_thread_enqueue, &pf->tq);
2157         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2158             device_get_nameunit(dev));
2159
2160         /* Create queue tasks and start queue taskqueues */
2161         for (int i = 0; i < vsi->num_queues; i++, que++) {
2162                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2163                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2164                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2165                     taskqueue_thread_enqueue, &que->tq);
2166 #ifdef RSS
2167                 CPU_SETOF(cpu_id, &cpu_mask);
2168                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2169                     &cpu_mask, "%s (bucket %d)",
2170                     device_get_nameunit(dev), cpu_id);
2171 #else
2172                 taskqueue_start_threads(&que->tq, 1, PI_NET,
2173                     "%s (que %d)", device_get_nameunit(dev), que->me);
2174 #endif
2175         }
2176
2177 }
2178
2179 static void
2180 ixl_free_taskqueues(struct ixl_pf *pf)
2181 {
2182         struct ixl_vsi          *vsi = &pf->vsi;
2183         struct ixl_queue        *que = vsi->queues;
2184
2185         if (pf->tq)
2186                 taskqueue_free(pf->tq);
2187         for (int i = 0; i < vsi->num_queues; i++, que++) {
2188                 if (que->tq)
2189                         taskqueue_free(que->tq);
2190         }
2191 }
2192
2193 /*********************************************************************
2194  *
2195  *  Setup MSIX Interrupt resources and handlers for the VSI
2196  *
2197  **********************************************************************/
2198 static int
2199 ixl_assign_vsi_msix(struct ixl_pf *pf)
2200 {
2201         device_t        dev = pf->dev;
2202         struct          ixl_vsi *vsi = &pf->vsi;
2203         struct          ixl_queue *que = vsi->queues;
2204         struct          tx_ring  *txr;
2205         int             error, rid, vector = 0;
2206 #ifdef  RSS
2207         cpuset_t cpu_mask;
2208 #endif
2209
2210         /* Admin Queue interrupt vector is 0 */
2211         rid = vector + 1;
2212         pf->res = bus_alloc_resource_any(dev,
2213             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2214         if (!pf->res) {
2215                 device_printf(dev, "Unable to allocate"
2216                     " bus resource: Adminq interrupt [rid=%d]\n", rid);
2217                 return (ENXIO);
2218         }
2219         /* Set the adminq vector and handler */
2220         error = bus_setup_intr(dev, pf->res,
2221             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222             ixl_msix_adminq, pf, &pf->tag);
2223         if (error) {
2224                 pf->res = NULL;
2225                 device_printf(dev, "Failed to register Admin que handler");
2226                 return (error);
2227         }
2228         bus_describe_intr(dev, pf->res, pf->tag, "aq");
2229         pf->admvec = vector;
2230         ++vector;
2231
2232         /* Now set up the stations */
2233         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2234                 int cpu_id = i;
2235                 rid = vector + 1;
2236                 txr = &que->txr;
2237                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2238                     RF_SHAREABLE | RF_ACTIVE);
2239                 if (que->res == NULL) {
2240                         device_printf(dev, "Unable to allocate"
2241                             " bus resource: que interrupt [rid=%d]\n", rid);
2242                         return (ENXIO);
2243                 }
2244                 /* Set the handler function */
2245                 error = bus_setup_intr(dev, que->res,
2246                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2247                     ixl_msix_que, que, &que->tag);
2248                 if (error) {
2249                         que->res = NULL;
2250                         device_printf(dev, "Failed to register que handler");
2251                         return (error);
2252                 }
2253                 bus_describe_intr(dev, que->res, que->tag, "que%d", i);
2254                 /* Bind the vector to a CPU */
2255 #ifdef RSS
2256                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2257 #endif
2258                 bus_bind_intr(dev, que->res, cpu_id);
2259                 que->msix = vector;
2260         }
2261
2262         return (0);
2263 }
2264
2265
2266 /*
2267  * Allocate MSI/X vectors
2268  */
2269 static int
2270 ixl_init_msix(struct ixl_pf *pf)
2271 {
2272         device_t dev = pf->dev;
2273         int rid, want, vectors, queues, available;
2274
2275         /* Override by tuneable */
2276         if (ixl_enable_msix == 0)
2277                 goto no_msix;
2278
2279         /*
2280         ** When used in a virtualized environment 
2281         ** PCI BUSMASTER capability may not be set
2282         ** so explicity set it here and rewrite
2283         ** the ENABLE in the MSIX control register
2284         ** at this point to cause the host to
2285         ** successfully initialize us.
2286         */
2287         {
2288                 u16 pci_cmd_word;
2289                 int msix_ctrl;
2290                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2291                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2292                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2293                 pci_find_cap(dev, PCIY_MSIX, &rid);
2294                 rid += PCIR_MSIX_CTRL;
2295                 msix_ctrl = pci_read_config(dev, rid, 2);
2296                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2297                 pci_write_config(dev, rid, msix_ctrl, 2);
2298         }
2299
2300         /* First try MSI/X */
2301         rid = PCIR_BAR(IXL_BAR);
2302         pf->msix_mem = bus_alloc_resource_any(dev,
2303             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2304         if (!pf->msix_mem) {
2305                 /* May not be enabled */
2306                 device_printf(pf->dev,
2307                     "Unable to map MSIX table\n");
2308                 goto no_msix;
2309         }
2310
2311         available = pci_msix_count(dev); 
2312         if (available == 0) { /* system has msix disabled */
2313                 bus_release_resource(dev, SYS_RES_MEMORY,
2314                     rid, pf->msix_mem);
2315                 pf->msix_mem = NULL;
2316                 goto no_msix;
2317         }
2318
2319         /* Figure out a reasonable auto config value */
2320         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2321
2322         /* Override with tunable value if tunable is less than autoconfig count */
2323         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
2324                 queues = ixl_max_queues;
2325         else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
2326                 device_printf(dev, "ixl_max_queues > # of cpus, using "
2327                     "autoconfig amount...\n");
2328         /* Or limit maximum auto-configured queues to 8 */
2329         else if ((ixl_max_queues == 0) && (queues > 8))
2330                 queues = 8;
2331
2332 #ifdef  RSS
2333         /* If we're doing RSS, clamp at the number of RSS buckets */
2334         if (queues > rss_getnumbuckets())
2335                 queues = rss_getnumbuckets();
2336 #endif
2337
2338         /*
2339         ** Want one vector (RX/TX pair) per queue
2340         ** plus an additional for the admin queue.
2341         */
2342         want = queues + 1;
2343         if (want <= available)  /* Have enough */
2344                 vectors = want;
2345         else {
2346                 device_printf(pf->dev,
2347                     "MSIX Configuration Problem, "
2348                     "%d vectors available but %d wanted!\n",
2349                     available, want);
2350                 return (0); /* Will go to Legacy setup */
2351         }
2352
2353         if (pci_alloc_msix(dev, &vectors) == 0) {
2354                 device_printf(pf->dev,
2355                     "Using MSIX interrupts with %d vectors\n", vectors);
2356                 pf->msix = vectors;
2357                 pf->vsi.num_queues = queues;
2358 #ifdef RSS
2359                 /*
2360                  * If we're doing RSS, the number of queues needs to
2361                  * match the number of RSS buckets that are configured.
2362                  *
2363                  * + If there's more queues than RSS buckets, we'll end
2364                  *   up with queues that get no traffic.
2365                  *
2366                  * + If there's more RSS buckets than queues, we'll end
2367                  *   up having multiple RSS buckets map to the same queue,
2368                  *   so there'll be some contention.
2369                  */
2370                 if (queues != rss_getnumbuckets()) {
2371                         device_printf(dev,
2372                             "%s: queues (%d) != RSS buckets (%d)"
2373                             "; performance will be impacted.\n",
2374                             __func__, queues, rss_getnumbuckets());
2375                 }
2376 #endif
2377                 return (vectors);
2378         }
2379 no_msix:
2380         vectors = pci_msi_count(dev);
2381         pf->vsi.num_queues = 1;
2382         ixl_max_queues = 1;
2383         ixl_enable_msix = 0;
2384         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2385                 device_printf(pf->dev, "Using an MSI interrupt\n");
2386         else {
2387                 vectors = 0;
2388                 device_printf(pf->dev, "Using a Legacy interrupt\n");
2389         }
2390         return (vectors);
2391 }
2392
2393 /*
2394  * Plumb MSIX vectors
2395  */
2396 static void
2397 ixl_configure_msix(struct ixl_pf *pf)
2398 {
2399         struct i40e_hw  *hw = &pf->hw;
2400         struct ixl_vsi *vsi = &pf->vsi;
2401         u32             reg;
2402         u16             vector = 1;
2403
2404         /* First set up the adminq - vector 0 */
2405         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2406         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2407
2408         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2409             I40E_PFINT_ICR0_ENA_GRST_MASK |
2410             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2411             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2412             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2413             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2414             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2415         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2416
2417         /*
2418          * 0x7FF is the end of the queue list.
2419          * This means we won't use MSI-X vector 0 for a queue interrupt
2420          * in MSIX mode.
2421          */
2422         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2423         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
2424         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
2425
2426         wr32(hw, I40E_PFINT_DYN_CTL0,
2427             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2428             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2429
2430         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2431
2432         /* Next configure the queues */
2433         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2434                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2435                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2436
2437                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2438                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2439                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2440                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2441                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2442                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2443
2444                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2445                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2446                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2447                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2448                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2449                 if (i == (vsi->num_queues - 1))
2450                         reg |= (IXL_QUEUE_EOL
2451                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2452                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2453         }
2454 }
2455
2456 /*
2457  * Configure for MSI single vector operation 
2458  */
2459 static void
2460 ixl_configure_legacy(struct ixl_pf *pf)
2461 {
2462         struct i40e_hw  *hw = &pf->hw;
2463         u32             reg;
2464
2465         wr32(hw, I40E_PFINT_ITR0(0), 0);
2466         wr32(hw, I40E_PFINT_ITR0(1), 0);
2467
2468         /* Setup "other" causes */
2469         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2470             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2471             | I40E_PFINT_ICR0_ENA_GRST_MASK
2472             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2473             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2474             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2475             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2476             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2477             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2478             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2479             ;
2480         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2481
2482         /* SW_ITR_IDX = 0, but don't change INTENA */
2483         wr32(hw, I40E_PFINT_DYN_CTL0,
2484             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2485             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2486         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2487         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2488
2489         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2490         wr32(hw, I40E_PFINT_LNKLST0, 0);
2491
2492         /* Associate the queue pair to the vector and enable the q int */
2493         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2494             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2495             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2496         wr32(hw, I40E_QINT_RQCTL(0), reg);
2497
2498         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2499             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2500             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2501         wr32(hw, I40E_QINT_TQCTL(0), reg);
2502
2503 }
2504
2505
2506 /*
2507  * Set the Initial ITR state
2508  */
2509 static void
2510 ixl_configure_itr(struct ixl_pf *pf)
2511 {
2512         struct i40e_hw          *hw = &pf->hw;
2513         struct ixl_vsi          *vsi = &pf->vsi;
2514         struct ixl_queue        *que = vsi->queues;
2515
2516         vsi->rx_itr_setting = ixl_rx_itr;
2517         if (ixl_dynamic_rx_itr)
2518                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2519         vsi->tx_itr_setting = ixl_tx_itr;
2520         if (ixl_dynamic_tx_itr)
2521                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2522         
2523         for (int i = 0; i < vsi->num_queues; i++, que++) {
2524                 struct tx_ring  *txr = &que->txr;
2525                 struct rx_ring  *rxr = &que->rxr;
2526
2527                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2528                     vsi->rx_itr_setting);
2529                 rxr->itr = vsi->rx_itr_setting;
2530                 rxr->latency = IXL_AVE_LATENCY;
2531                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2532                     vsi->tx_itr_setting);
2533                 txr->itr = vsi->tx_itr_setting;
2534                 txr->latency = IXL_AVE_LATENCY;
2535         }
2536 }
2537
2538
2539 static int
2540 ixl_allocate_pci_resources(struct ixl_pf *pf)
2541 {
2542         int             rid;
2543         device_t        dev = pf->dev;
2544
2545         rid = PCIR_BAR(0);
2546         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2547             &rid, RF_ACTIVE);
2548
2549         if (!(pf->pci_mem)) {
2550                 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
2551                 return (ENXIO);
2552         }
2553
2554         pf->osdep.mem_bus_space_tag =
2555                 rman_get_bustag(pf->pci_mem);
2556         pf->osdep.mem_bus_space_handle =
2557                 rman_get_bushandle(pf->pci_mem);
2558         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2559         pf->osdep.flush_reg = I40E_GLGEN_STAT;
2560         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2561
2562         pf->hw.back = &pf->osdep;
2563
2564         /*
2565         ** Now setup MSI or MSI/X, should
2566         ** return us the number of supported
2567         ** vectors. (Will be 1 for MSI)
2568         */
2569         pf->msix = ixl_init_msix(pf);
2570         return (0);
2571 }
2572
2573 static void
2574 ixl_free_interrupt_resources(struct ixl_pf *pf)
2575 {
2576         struct ixl_vsi          *vsi = &pf->vsi;
2577         struct ixl_queue        *que = vsi->queues;
2578         device_t                dev = pf->dev;
2579         int rid;
2580
2581         /* We may get here before stations are setup */
2582         if ((!ixl_enable_msix) || (que == NULL))
2583                 goto early;
2584
2585         /*
2586         **  Release all msix VSI resources:
2587         */
2588         for (int i = 0; i < vsi->num_queues; i++, que++) {
2589                 rid = que->msix + 1;
2590                 if (que->tag != NULL) {
2591                         bus_teardown_intr(dev, que->res, que->tag);
2592                         que->tag = NULL;
2593                 }
2594                 if (que->res != NULL) {
2595                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2596                         que->res = NULL;
2597                 }
2598         }
2599
2600 early:
2601         /* Clean the AdminQ interrupt last */
2602         if (pf->admvec) /* we are doing MSIX */
2603                 rid = pf->admvec + 1;
2604         else
2605                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2606
2607         if (pf->tag != NULL) {
2608                 bus_teardown_intr(dev, pf->res, pf->tag);
2609                 pf->tag = NULL;
2610         }
2611         if (pf->res != NULL) {
2612                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2613                 pf->res = NULL;
2614         }
2615 }
2616
2617 static void
2618 ixl_free_pci_resources(struct ixl_pf *pf)
2619 {
2620         device_t                dev = pf->dev;
2621         int                     memrid;
2622
2623         ixl_free_interrupt_resources(pf);
2624
2625         if (pf->msix)
2626                 pci_release_msi(dev);
2627
2628         memrid = PCIR_BAR(IXL_BAR);
2629
2630         if (pf->msix_mem != NULL)
2631                 bus_release_resource(dev, SYS_RES_MEMORY,
2632                     memrid, pf->msix_mem);
2633
2634         if (pf->pci_mem != NULL)
2635                 bus_release_resource(dev, SYS_RES_MEMORY,
2636                     PCIR_BAR(0), pf->pci_mem);
2637
2638         return;
2639 }
2640
2641 static void
2642 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2643 {
2644         /* Display supported media types */
2645         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2646                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2647
2648         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2649                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2650         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2651                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2652         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2653                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2654
2655         if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2656             phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2657             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2658                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2659
2660         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2661                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2662         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2663                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2664         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2665                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2666
2667         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2668             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2669             phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2670             phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2671             phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2672                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2673         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2674                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2675         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2676                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2677
2678 #ifndef IFM_ETH_XTYPE
2679         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2680                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2681
2682         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2683             phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2684             phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2685             phy_type & (1 << I40E_PHY_TYPE_SFI))
2686                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2687         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2688                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2689         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2690                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2691
2692         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2693                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2694         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2695                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2696 #else
2697         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2698                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2699
2700         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2701             || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2702                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2703         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2704                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2705         if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2706                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2707         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2708                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2709         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2710                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2711
2712         if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2713                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2714
2715         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2716                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2717         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2718                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2719 #endif
2720 }
2721
2722 /*********************************************************************
2723  *
2724  *  Setup networking device structure and register an interface.
2725  *
2726  **********************************************************************/
2727 static int
2728 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2729 {
2730         struct ifnet            *ifp;
2731         struct i40e_hw          *hw = vsi->hw;
2732         struct ixl_queue        *que = vsi->queues;
2733         struct i40e_aq_get_phy_abilities_resp abilities;
2734         enum i40e_status_code aq_error = 0;
2735
2736         INIT_DEBUGOUT("ixl_setup_interface: begin");
2737
2738         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2739         if (ifp == NULL) {
2740                 device_printf(dev, "can not allocate ifnet structure\n");
2741                 return (-1);
2742         }
2743         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2744         ifp->if_mtu = ETHERMTU;
2745         ifp->if_baudrate = IF_Gbps(40);
2746         ifp->if_init = ixl_init;
2747         ifp->if_softc = vsi;
2748         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2749         ifp->if_ioctl = ixl_ioctl;
2750
2751 #if __FreeBSD_version >= 1100036
2752         if_setgetcounterfn(ifp, ixl_get_counter);
2753 #endif
2754
2755         ifp->if_transmit = ixl_mq_start;
2756
2757         ifp->if_qflush = ixl_qflush;
2758
2759         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2760
2761         vsi->max_frame_size =
2762             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2763             + ETHER_VLAN_ENCAP_LEN;
2764
2765         /*
2766          * Tell the upper layer(s) we support long frames.
2767          */
2768         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2769
2770         ifp->if_capabilities |= IFCAP_HWCSUM;
2771         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2772         ifp->if_capabilities |= IFCAP_TSO;
2773         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2774         ifp->if_capabilities |= IFCAP_LRO;
2775
2776         /* VLAN capabilties */
2777         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2778                              |  IFCAP_VLAN_HWTSO
2779                              |  IFCAP_VLAN_MTU
2780                              |  IFCAP_VLAN_HWCSUM;
2781         ifp->if_capenable = ifp->if_capabilities;
2782
2783         /*
2784         ** Don't turn this on by default, if vlans are
2785         ** created on another pseudo device (eg. lagg)
2786         ** then vlan events are not passed thru, breaking
2787         ** operation, but with HW FILTER off it works. If
2788         ** using vlans directly on the ixl driver you can
2789         ** enable this and get full hardware tag filtering.
2790         */
2791         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2792
2793         /*
2794          * Specify the media types supported by this adapter and register
2795          * callbacks to update media and link information
2796          */
2797         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2798                      ixl_media_status);
2799
2800         aq_error = i40e_aq_get_phy_capabilities(hw,
2801             FALSE, TRUE, &abilities, NULL);
2802         /* May need delay to detect fiber correctly */
2803         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2804                 i40e_msec_delay(200);
2805                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2806                     TRUE, &abilities, NULL);
2807         }
2808         if (aq_error) {
2809                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2810                         device_printf(dev, "Unknown PHY type detected!\n");
2811                 else
2812                         device_printf(dev,
2813                             "Error getting supported media types, err %d,"
2814                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2815                 return (0);
2816         }
2817
2818         ixl_add_ifmedia(vsi, abilities.phy_type);
2819
2820         /* Use autoselect media by default */
2821         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2822         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2823
2824         ether_ifattach(ifp, hw->mac.addr);
2825
2826         return (0);
2827 }
2828
2829 /*
2830 ** Run when the Admin Queue gets a link state change interrupt.
2831 */
2832 static void
2833 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2834 {
2835         struct i40e_hw  *hw = &pf->hw; 
2836         device_t dev = pf->dev;
2837         struct i40e_aqc_get_link_status *status =
2838             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2839
2840
2841         /* Request link status from adapter */
2842         hw->phy.get_link_info = TRUE;
2843         i40e_get_link_status(hw, &pf->link_up);
2844
2845         /* Print out message if an unqualified module is found */
2846         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2847             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2848             (!(status->link_info & I40E_AQ_LINK_UP)))
2849                 device_printf(dev, "Link failed because "
2850                     "an unqualified module was detected!\n");
2851
2852         /* Update OS link info */
2853         ixl_update_link_status(pf);
2854 }
2855
2856 /*********************************************************************
2857  *
2858  *  Get Firmware Switch configuration
2859  *      - this will need to be more robust when more complex
2860  *        switch configurations are enabled.
2861  *
2862  **********************************************************************/
2863 static int
2864 ixl_switch_config(struct ixl_pf *pf)
2865 {
2866         struct i40e_hw  *hw = &pf->hw; 
2867         struct ixl_vsi  *vsi = &pf->vsi;
2868         device_t        dev = vsi->dev;
2869         struct i40e_aqc_get_switch_config_resp *sw_config;
2870         u8      aq_buf[I40E_AQ_LARGE_BUF];
2871         int     ret;
2872         u16     next = 0;
2873
2874         memset(&aq_buf, 0, sizeof(aq_buf));
2875         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2876         ret = i40e_aq_get_switch_config(hw, sw_config,
2877             sizeof(aq_buf), &next, NULL);
2878         if (ret) {
2879                 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2880                     ret);
2881                 return (ret);
2882         }
2883 #ifdef IXL_DEBUG
2884         device_printf(dev,
2885             "Switch config: header reported: %d in structure, %d total\n",
2886             sw_config->header.num_reported, sw_config->header.num_total);
2887         for (int i = 0; i < sw_config->header.num_reported; i++) {
2888                 device_printf(dev,
2889                     "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2890                     sw_config->element[i].element_type,
2891                     sw_config->element[i].seid,
2892                     sw_config->element[i].uplink_seid,
2893                     sw_config->element[i].downlink_seid);
2894         }
2895 #endif
2896         /* Simplified due to a single VSI at the moment */
2897         vsi->uplink_seid = sw_config->element[0].uplink_seid;
2898         vsi->downlink_seid = sw_config->element[0].downlink_seid;
2899         vsi->seid = sw_config->element[0].seid;
2900         return (ret);
2901 }
2902
2903 /*********************************************************************
2904  *
2905  *  Initialize the VSI:  this handles contexts, which means things
2906  *                       like the number of descriptors, buffer size,
2907  *                       plus we init the rings thru this function.
2908  *
2909  **********************************************************************/
2910 static int
2911 ixl_initialize_vsi(struct ixl_vsi *vsi)
2912 {
2913         struct ixl_pf           *pf = vsi->back;
2914         struct ixl_queue        *que = vsi->queues;
2915         device_t                dev = vsi->dev;
2916         struct i40e_hw          *hw = vsi->hw;
2917         struct i40e_vsi_context ctxt;
2918         int                     err = 0;
2919
2920         memset(&ctxt, 0, sizeof(ctxt));
2921         ctxt.seid = vsi->seid;
2922         if (pf->veb_seid != 0)
2923                 ctxt.uplink_seid = pf->veb_seid;
2924         ctxt.pf_num = hw->pf_id;
2925         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2926         if (err) {
2927                 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err);
2928                 return (err);
2929         }
2930 #ifdef IXL_DEBUG
2931         device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2932             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2933             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2934             ctxt.uplink_seid, ctxt.vsi_number,
2935             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2936             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2937             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2938 #endif
2939         /*
2940         ** Set the queue and traffic class bits
2941         **  - when multiple traffic classes are supported
2942         **    this will need to be more robust.
2943         */
2944         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2945         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2946         ctxt.info.queue_mapping[0] = 0; 
2947         /* This VSI is assigned 64 queues (we may not use all of them) */
2948         ctxt.info.tc_mapping[0] = 0x0c00;
2949
2950         /* Set VLAN receive stripping mode */
2951         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2952         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2953         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2954                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2955         else
2956                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2957
2958         /* Keep copy of VSI info in VSI for statistic counters */
2959         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2960
2961         /* Reset VSI statistics */
2962         ixl_vsi_reset_stats(vsi);
2963         vsi->hw_filters_add = 0;
2964         vsi->hw_filters_del = 0;
2965
2966         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2967
2968         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2969         if (err) {
2970                 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
2971                    err, hw->aq.asq_last_status);
2972                 return (err);
2973         }
2974
2975         for (int i = 0; i < vsi->num_queues; i++, que++) {
2976                 struct tx_ring          *txr = &que->txr;
2977                 struct rx_ring          *rxr = &que->rxr;
2978                 struct i40e_hmc_obj_txq tctx;
2979                 struct i40e_hmc_obj_rxq rctx;
2980                 u32                     txctl;
2981                 u16                     size;
2982
2983                 /* Setup the HMC TX Context  */
2984                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2985                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2986                 tctx.new_context = 1;
2987                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2988                 tctx.qlen = que->num_desc;
2989                 tctx.fc_ena = 0;
2990                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2991                 /* Enable HEAD writeback */
2992                 tctx.head_wb_ena = 1;
2993                 tctx.head_wb_addr = txr->dma.pa +
2994                     (que->num_desc * sizeof(struct i40e_tx_desc));
2995                 tctx.rdylist_act = 0;
2996                 err = i40e_clear_lan_tx_queue_context(hw, i);
2997                 if (err) {
2998                         device_printf(dev, "Unable to clear TX context\n");
2999                         break;
3000                 }
3001                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
3002                 if (err) {
3003                         device_printf(dev, "Unable to set TX context\n");
3004                         break;
3005                 }
3006                 /* Associate the ring with this PF */
3007                 txctl = I40E_QTX_CTL_PF_QUEUE;
3008                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3009                     I40E_QTX_CTL_PF_INDX_MASK);
3010                 wr32(hw, I40E_QTX_CTL(i), txctl);
3011                 ixl_flush(hw);
3012
3013                 /* Do ring (re)init */
3014                 ixl_init_tx_ring(que);
3015
3016                 /* Next setup the HMC RX Context  */
3017                 if (vsi->max_frame_size <= MCLBYTES)
3018                         rxr->mbuf_sz = MCLBYTES;
3019                 else
3020                         rxr->mbuf_sz = MJUMPAGESIZE;
3021
3022                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
3023
3024                 /* Set up an RX context for the HMC */
3025                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
3026                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
3027                 /* ignore header split for now */
3028                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
3029                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
3030                     vsi->max_frame_size : max_rxmax;
3031                 rctx.dtype = 0;
3032                 rctx.dsize = 1; /* do 32byte descriptors */
3033                 rctx.hsplit_0 = 0;  /* no HDR split initially */
3034                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
3035                 rctx.qlen = que->num_desc;
3036                 rctx.tphrdesc_ena = 1;
3037                 rctx.tphwdesc_ena = 1;
3038                 rctx.tphdata_ena = 0;
3039                 rctx.tphhead_ena = 0;
3040                 rctx.lrxqthresh = 2;
3041                 rctx.crcstrip = 1;
3042                 rctx.l2tsel = 1;
3043                 rctx.showiv = 1;
3044                 rctx.fc_ena = 0;
3045                 rctx.prefena = 1;
3046
3047                 err = i40e_clear_lan_rx_queue_context(hw, i);
3048                 if (err) {
3049                         device_printf(dev,
3050                             "Unable to clear RX context %d\n", i);
3051                         break;
3052                 }
3053                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
3054                 if (err) {
3055                         device_printf(dev, "Unable to set RX context %d\n", i);
3056                         break;
3057                 }
3058                 err = ixl_init_rx_ring(que);
3059                 if (err) {
3060                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
3061                         break;
3062                 }
3063                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
3064 #ifdef DEV_NETMAP
3065                 /* preserve queue */
3066                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
3067                         struct netmap_adapter *na = NA(vsi->ifp);
3068                         struct netmap_kring *kring = &na->rx_rings[i];
3069                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3070                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
3071                 } else
3072 #endif /* DEV_NETMAP */
3073                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
3074         }
3075         return (err);
3076 }
3077
3078
3079 /*********************************************************************
3080  *
3081  *  Free all VSI structs.
3082  *
3083  **********************************************************************/
3084 void
3085 ixl_free_vsi(struct ixl_vsi *vsi)
3086 {
3087         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
3088         struct ixl_queue        *que = vsi->queues;
3089
3090         /* Free station queues */
3091         if (!vsi->queues)
3092                 goto free_filters;
3093
3094         for (int i = 0; i < vsi->num_queues; i++, que++) {
3095                 struct tx_ring *txr = &que->txr;
3096                 struct rx_ring *rxr = &que->rxr;
3097         
3098                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
3099                         continue;
3100                 IXL_TX_LOCK(txr);
3101                 ixl_free_que_tx(que);
3102                 if (txr->base)
3103                         i40e_free_dma_mem(&pf->hw, &txr->dma);
3104                 IXL_TX_UNLOCK(txr);
3105                 IXL_TX_LOCK_DESTROY(txr);
3106
3107                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
3108                         continue;
3109                 IXL_RX_LOCK(rxr);
3110                 ixl_free_que_rx(que);
3111                 if (rxr->base)
3112                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
3113                 IXL_RX_UNLOCK(rxr);
3114                 IXL_RX_LOCK_DESTROY(rxr);
3115                 
3116         }
3117         free(vsi->queues, M_DEVBUF);
3118
3119 free_filters:
3120         /* Free VSI filter list */
3121         ixl_free_mac_filters(vsi);
3122 }
3123
3124 static void
3125 ixl_free_mac_filters(struct ixl_vsi *vsi)
3126 {
3127         struct ixl_mac_filter *f;
3128
3129         while (!SLIST_EMPTY(&vsi->ftl)) {
3130                 f = SLIST_FIRST(&vsi->ftl);
3131                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
3132                 free(f, M_DEVBUF);
3133         }
3134 }
3135
3136
3137 /*********************************************************************
3138  *
3139  *  Allocate memory for the VSI (virtual station interface) and their
3140  *  associated queues, rings and the descriptors associated with each,
3141  *  called only once at attach.
3142  *
3143  **********************************************************************/
3144 static int
3145 ixl_setup_stations(struct ixl_pf *pf)
3146 {
3147         device_t                dev = pf->dev;
3148         struct ixl_vsi          *vsi;
3149         struct ixl_queue        *que;
3150         struct tx_ring          *txr;
3151         struct rx_ring          *rxr;
3152         int                     rsize, tsize;
3153         int                     error = I40E_SUCCESS;
3154
3155         vsi = &pf->vsi;
3156         vsi->back = (void *)pf;
3157         vsi->hw = &pf->hw;
3158         vsi->id = 0;
3159         vsi->num_vlans = 0;
3160         vsi->back = pf;
3161
3162         /* Get memory for the station queues */
3163         if (!(vsi->queues =
3164             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
3165             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3166                 device_printf(dev, "Unable to allocate queue memory\n");
3167                 error = ENOMEM;
3168                 goto early;
3169         }
3170
3171         for (int i = 0; i < vsi->num_queues; i++) {
3172                 que = &vsi->queues[i];
3173                 que->num_desc = ixl_ringsz;
3174                 que->me = i;
3175                 que->vsi = vsi;
3176                 /* mark the queue as active */
3177                 vsi->active_queues |= (u64)1 << que->me;
3178                 txr = &que->txr;
3179                 txr->que = que;
3180                 txr->tail = I40E_QTX_TAIL(que->me);
3181
3182                 /* Initialize the TX lock */
3183                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3184                     device_get_nameunit(dev), que->me);
3185                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
3186                 /* Create the TX descriptor ring */
3187                 tsize = roundup2((que->num_desc *
3188                     sizeof(struct i40e_tx_desc)) +
3189                     sizeof(u32), DBA_ALIGN);
3190                 if (i40e_allocate_dma_mem(&pf->hw,
3191                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3192                         device_printf(dev,
3193                             "Unable to allocate TX Descriptor memory\n");
3194                         error = ENOMEM;
3195                         goto fail;
3196                 }
3197                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
3198                 bzero((void *)txr->base, tsize);
3199                 /* Now allocate transmit soft structs for the ring */
3200                 if (ixl_allocate_tx_data(que)) {
3201                         device_printf(dev,
3202                             "Critical Failure setting up TX structures\n");
3203                         error = ENOMEM;
3204                         goto fail;
3205                 }
3206                 /* Allocate a buf ring */
3207                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3208                     M_NOWAIT, &txr->mtx);
3209                 if (txr->br == NULL) {
3210                         device_printf(dev,
3211                             "Critical Failure setting up TX buf ring\n");
3212                         error = ENOMEM;
3213                         goto fail;
3214                 }
3215
3216                 /*
3217                  * Next the RX queues...
3218                  */ 
3219                 rsize = roundup2(que->num_desc *
3220                     sizeof(union i40e_rx_desc), DBA_ALIGN);
3221                 rxr = &que->rxr;
3222                 rxr->que = que;
3223                 rxr->tail = I40E_QRX_TAIL(que->me);
3224
3225                 /* Initialize the RX side lock */
3226                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3227                     device_get_nameunit(dev), que->me);
3228                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3229
3230                 if (i40e_allocate_dma_mem(&pf->hw,
3231                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3232                         device_printf(dev,
3233                             "Unable to allocate RX Descriptor memory\n");
3234                         error = ENOMEM;
3235                         goto fail;
3236                 }
3237                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3238                 bzero((void *)rxr->base, rsize);
3239
3240                 /* Allocate receive soft structs for the ring*/
3241                 if (ixl_allocate_rx_data(que)) {
3242                         device_printf(dev,
3243                             "Critical Failure setting up receive structs\n");
3244                         error = ENOMEM;
3245                         goto fail;
3246                 }
3247         }
3248
3249         return (0);
3250
3251 fail:
3252         for (int i = 0; i < vsi->num_queues; i++) {
3253                 que = &vsi->queues[i];
3254                 rxr = &que->rxr;
3255                 txr = &que->txr;
3256                 if (rxr->base)
3257                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
3258                 if (txr->base)
3259                         i40e_free_dma_mem(&pf->hw, &txr->dma);
3260         }
3261
3262 early:
3263         return (error);
3264 }
3265
3266 /*
3267 ** Provide a update to the queue RX
3268 ** interrupt moderation value.
3269 */
3270 static void
3271 ixl_set_queue_rx_itr(struct ixl_queue *que)
3272 {
3273         struct ixl_vsi  *vsi = que->vsi;
3274         struct i40e_hw  *hw = vsi->hw;
3275         struct rx_ring  *rxr = &que->rxr;
3276         u16             rx_itr;
3277         u16             rx_latency = 0;
3278         int             rx_bytes;
3279
3280
3281         /* Idle, do nothing */
3282         if (rxr->bytes == 0)
3283                 return;
3284
3285         if (ixl_dynamic_rx_itr) {
3286                 rx_bytes = rxr->bytes/rxr->itr;
3287                 rx_itr = rxr->itr;
3288
3289                 /* Adjust latency range */
3290                 switch (rxr->latency) {
3291                 case IXL_LOW_LATENCY:
3292                         if (rx_bytes > 10) {
3293                                 rx_latency = IXL_AVE_LATENCY;
3294                                 rx_itr = IXL_ITR_20K;
3295                         }
3296                         break;
3297                 case IXL_AVE_LATENCY:
3298                         if (rx_bytes > 20) {
3299                                 rx_latency = IXL_BULK_LATENCY;
3300                                 rx_itr = IXL_ITR_8K;
3301                         } else if (rx_bytes <= 10) {
3302                                 rx_latency = IXL_LOW_LATENCY;
3303                                 rx_itr = IXL_ITR_100K;
3304                         }
3305                         break;
3306                 case IXL_BULK_LATENCY:
3307                         if (rx_bytes <= 20) {
3308                                 rx_latency = IXL_AVE_LATENCY;
3309                                 rx_itr = IXL_ITR_20K;
3310                         }
3311                         break;
3312                  }
3313
3314                 rxr->latency = rx_latency;
3315
3316                 if (rx_itr != rxr->itr) {
3317                         /* do an exponential smoothing */
3318                         rx_itr = (10 * rx_itr * rxr->itr) /
3319                             ((9 * rx_itr) + rxr->itr);
3320                         rxr->itr = rx_itr & IXL_MAX_ITR;
3321                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3322                             que->me), rxr->itr);
3323                 }
3324         } else { /* We may have have toggled to non-dynamic */
3325                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3326                         vsi->rx_itr_setting = ixl_rx_itr;
3327                 /* Update the hardware if needed */
3328                 if (rxr->itr != vsi->rx_itr_setting) {
3329                         rxr->itr = vsi->rx_itr_setting;
3330                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3331                             que->me), rxr->itr);
3332                 }
3333         }
3334         rxr->bytes = 0;
3335         rxr->packets = 0;
3336         return;
3337 }
3338
3339
3340 /*
3341 ** Provide a update to the queue TX
3342 ** interrupt moderation value.
3343 */
3344 static void
3345 ixl_set_queue_tx_itr(struct ixl_queue *que)
3346 {
3347         struct ixl_vsi  *vsi = que->vsi;
3348         struct i40e_hw  *hw = vsi->hw;
3349         struct tx_ring  *txr = &que->txr;
3350         u16             tx_itr;
3351         u16             tx_latency = 0;
3352         int             tx_bytes;
3353
3354
3355         /* Idle, do nothing */
3356         if (txr->bytes == 0)
3357                 return;
3358
3359         if (ixl_dynamic_tx_itr) {
3360                 tx_bytes = txr->bytes/txr->itr;
3361                 tx_itr = txr->itr;
3362
3363                 switch (txr->latency) {
3364                 case IXL_LOW_LATENCY:
3365                         if (tx_bytes > 10) {
3366                                 tx_latency = IXL_AVE_LATENCY;
3367                                 tx_itr = IXL_ITR_20K;
3368                         }
3369                         break;
3370                 case IXL_AVE_LATENCY:
3371                         if (tx_bytes > 20) {
3372                                 tx_latency = IXL_BULK_LATENCY;
3373                                 tx_itr = IXL_ITR_8K;
3374                         } else if (tx_bytes <= 10) {
3375                                 tx_latency = IXL_LOW_LATENCY;
3376                                 tx_itr = IXL_ITR_100K;
3377                         }
3378                         break;
3379                 case IXL_BULK_LATENCY:
3380                         if (tx_bytes <= 20) {
3381                                 tx_latency = IXL_AVE_LATENCY;
3382                                 tx_itr = IXL_ITR_20K;
3383                         }
3384                         break;
3385                 }
3386
3387                 txr->latency = tx_latency;
3388
3389                 if (tx_itr != txr->itr) {
3390                  /* do an exponential smoothing */
3391                         tx_itr = (10 * tx_itr * txr->itr) /
3392                             ((9 * tx_itr) + txr->itr);
3393                         txr->itr = tx_itr & IXL_MAX_ITR;
3394                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3395                             que->me), txr->itr);
3396                 }
3397
3398         } else { /* We may have have toggled to non-dynamic */
3399                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3400                         vsi->tx_itr_setting = ixl_tx_itr;
3401                 /* Update the hardware if needed */
3402                 if (txr->itr != vsi->tx_itr_setting) {
3403                         txr->itr = vsi->tx_itr_setting;
3404                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3405                             que->me), txr->itr);
3406                 }
3407         }
3408         txr->bytes = 0;
3409         txr->packets = 0;
3410         return;
3411 }
3412
3413 #define QUEUE_NAME_LEN 32
3414
3415 static void
3416 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3417     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3418 {
3419         struct sysctl_oid *tree;
3420         struct sysctl_oid_list *child;
3421         struct sysctl_oid_list *vsi_list;
3422
3423         tree = device_get_sysctl_tree(pf->dev);
3424         child = SYSCTL_CHILDREN(tree);
3425         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3426                                    CTLFLAG_RD, NULL, "VSI Number");
3427         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3428
3429         ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3430 }
3431
3432 static void
3433 ixl_add_hw_stats(struct ixl_pf *pf)
3434 {
3435         device_t dev = pf->dev;
3436         struct ixl_vsi *vsi = &pf->vsi;
3437         struct ixl_queue *queues = vsi->queues;
3438         struct i40e_hw_port_stats *pf_stats = &pf->stats;
3439
3440         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3441         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3442         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3443         struct sysctl_oid_list *vsi_list;
3444
3445         struct sysctl_oid *queue_node;
3446         struct sysctl_oid_list *queue_list;
3447
3448         struct tx_ring *txr;
3449         struct rx_ring *rxr;
3450         char queue_namebuf[QUEUE_NAME_LEN];
3451
3452         /* Driver statistics */
3453         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3454                         CTLFLAG_RD, &pf->watchdog_events,
3455                         "Watchdog timeouts");
3456         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3457                         CTLFLAG_RD, &pf->admin_irq,
3458                         "Admin Queue IRQ Handled");
3459
3460         ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3461         vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3462
3463         /* Queue statistics */
3464         for (int q = 0; q < vsi->num_queues; q++) {
3465                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3466                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3467                     OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3468                 queue_list = SYSCTL_CHILDREN(queue_node);
3469
3470                 txr = &(queues[q].txr);
3471                 rxr = &(queues[q].rxr);
3472
3473                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3474                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3475                                 "m_defrag() failed");
3476                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3477                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3478                                 "Driver dropped packets");
3479                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3480                                 CTLFLAG_RD, &(queues[q].irqs),
3481                                 "irqs on this queue");
3482                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3483                                 CTLFLAG_RD, &(queues[q].tso),
3484                                 "TSO");
3485                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3486                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3487                                 "Driver tx dma failure in xmit");
3488                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3489                                 CTLFLAG_RD, &(txr->no_desc),
3490                                 "Queue No Descriptor Available");
3491                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3492                                 CTLFLAG_RD, &(txr->total_packets),
3493                                 "Queue Packets Transmitted");
3494                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3495                                 CTLFLAG_RD, &(txr->tx_bytes),
3496                                 "Queue Bytes Transmitted");
3497                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3498                                 CTLFLAG_RD, &(rxr->rx_packets),
3499                                 "Queue Packets Received");
3500                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3501                                 CTLFLAG_RD, &(rxr->rx_bytes),
3502                                 "Queue Bytes Received");
3503         }
3504
3505         /* MAC stats */
3506         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3507 }
3508
3509 static void
3510 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3511         struct sysctl_oid_list *child,
3512         struct i40e_eth_stats *eth_stats)
3513 {
3514         struct ixl_sysctl_info ctls[] =
3515         {
3516                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3517                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3518                         "Unicast Packets Received"},
3519                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3520                         "Multicast Packets Received"},
3521                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3522                         "Broadcast Packets Received"},
3523                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3524                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3525                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3526                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3527                         "Multicast Packets Transmitted"},
3528                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3529                         "Broadcast Packets Transmitted"},
3530                 // end
3531                 {0,0,0}
3532         };
3533
3534         struct ixl_sysctl_info *entry = ctls;
3535         while (entry->stat != NULL)
3536         {
3537                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3538                                 CTLFLAG_RD, entry->stat,
3539                                 entry->description);
3540                 entry++;
3541         }
3542 }
3543
3544 static void
3545 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3546         struct sysctl_oid_list *child,
3547         struct i40e_hw_port_stats *stats)
3548 {
3549         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3550                                     CTLFLAG_RD, NULL, "Mac Statistics");
3551         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3552
3553         struct i40e_eth_stats *eth_stats = &stats->eth;
3554         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3555
3556         struct ixl_sysctl_info ctls[] = 
3557         {
3558                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3559                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3560                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3561                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3562                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3563                 /* Packet Reception Stats */
3564                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3565                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3566                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3567                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3568                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3569                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3570                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3571                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3572                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3573                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3574                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3575                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3576                 /* Packet Transmission Stats */
3577                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3578                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3579                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3580                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3581                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3582                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3583                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3584                 /* Flow control */
3585                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3586                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3587                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3588                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3589                 /* End */
3590                 {0,0,0}
3591         };
3592
3593         struct ixl_sysctl_info *entry = ctls;
3594         while (entry->stat != NULL)
3595         {
3596                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3597                                 CTLFLAG_RD, entry->stat,
3598                                 entry->description);
3599                 entry++;
3600         }
3601 }
3602
3603
3604 /*
3605 ** ixl_config_rss - setup RSS 
3606 **  - note this is done for the single vsi
3607 */
3608 static void ixl_config_rss(struct ixl_vsi *vsi)
3609 {
3610         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3611         struct i40e_hw  *hw = vsi->hw;
3612         u32             lut = 0;
3613         u64             set_hena = 0, hena;
3614         int             i, j, que_id;
3615 #ifdef RSS
3616         u32             rss_hash_config;
3617         u32             rss_seed[IXL_KEYSZ];
3618 #else
3619         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3620                             0x183cfd8c, 0xce880440, 0x580cbc3c,
3621                             0x35897377, 0x328b25e1, 0x4fa98922,
3622                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3623 #endif
3624
3625 #ifdef RSS
3626         /* Fetch the configured RSS key */
3627         rss_getkey((uint8_t *) &rss_seed);
3628 #endif
3629
3630         /* Fill out hash function seed */
3631         for (i = 0; i < IXL_KEYSZ; i++)
3632                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3633
3634         /* Enable PCTYPES for RSS: */
3635 #ifdef RSS
3636         rss_hash_config = rss_gethashconfig();
3637         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3638                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3639         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3640                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3641         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3642                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3643         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3644                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3645         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3646                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3647         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3648                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3649         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3650                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3651 #else
3652         set_hena =
3653                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3654                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3655                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3656                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3657                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3658                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3659                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3660                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3661                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3662                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3663                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3664 #endif
3665         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3666             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3667         hena |= set_hena;
3668         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3669         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3670
3671         /* Populate the LUT with max no. of queues in round robin fashion */
3672         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3673                 if (j == vsi->num_queues)
3674                         j = 0;
3675 #ifdef RSS
3676                 /*
3677                  * Fetch the RSS bucket id for the given indirection entry.
3678                  * Cap it at the number of configured buckets (which is
3679                  * num_queues.)
3680                  */
3681                 que_id = rss_get_indirection_to_bucket(i);
3682                 que_id = que_id % vsi->num_queues;
3683 #else
3684                 que_id = j;
3685 #endif
3686                 /* lut = 4-byte sliding window of 4 lut entries */
3687                 lut = (lut << 8) | (que_id &
3688                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3689                 /* On i = 3, we have 4 entries in lut; write to the register */
3690                 if ((i & 3) == 3)
3691                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3692         }
3693         ixl_flush(hw);
3694 }
3695
3696
3697 /*
3698 ** This routine is run via an vlan config EVENT,
3699 ** it enables us to use the HW Filter table since
3700 ** we can get the vlan id. This just creates the
3701 ** entry in the soft version of the VFTA, init will
3702 ** repopulate the real table.
3703 */
3704 static void
3705 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3706 {
3707         struct ixl_vsi  *vsi = ifp->if_softc;
3708         struct i40e_hw  *hw = vsi->hw;
3709         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3710
3711         if (ifp->if_softc !=  arg)   /* Not our event */
3712                 return;
3713
3714         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3715                 return;
3716
3717         IXL_PF_LOCK(pf);
3718         ++vsi->num_vlans;
3719         ixl_add_filter(vsi, hw->mac.addr, vtag);
3720         IXL_PF_UNLOCK(pf);
3721 }
3722
3723 /*
3724 ** This routine is run via an vlan
3725 ** unconfig EVENT, remove our entry
3726 ** in the soft vfta.
3727 */
3728 static void
3729 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3730 {
3731         struct ixl_vsi  *vsi = ifp->if_softc;
3732         struct i40e_hw  *hw = vsi->hw;
3733         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3734
3735         if (ifp->if_softc !=  arg)
3736                 return;
3737
3738         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3739                 return;
3740
3741         IXL_PF_LOCK(pf);
3742         --vsi->num_vlans;
3743         ixl_del_filter(vsi, hw->mac.addr, vtag);
3744         IXL_PF_UNLOCK(pf);
3745 }
3746
3747 /*
3748 ** This routine updates vlan filters, called by init
3749 ** it scans the filter table and then updates the hw
3750 ** after a soft reset.
3751 */
3752 static void
3753 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3754 {
3755         struct ixl_mac_filter   *f;
3756         int                     cnt = 0, flags;
3757
3758         if (vsi->num_vlans == 0)
3759                 return;
3760         /*
3761         ** Scan the filter list for vlan entries,
3762         ** mark them for addition and then call
3763         ** for the AQ update.
3764         */
3765         SLIST_FOREACH(f, &vsi->ftl, next) {
3766                 if (f->flags & IXL_FILTER_VLAN) {
3767                         f->flags |=
3768                             (IXL_FILTER_ADD |
3769                             IXL_FILTER_USED);
3770                         cnt++;
3771                 }
3772         }
3773         if (cnt == 0) {
3774                 printf("setup vlan: no filters found!\n");
3775                 return;
3776         }
3777         flags = IXL_FILTER_VLAN;
3778         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3779         ixl_add_hw_filters(vsi, flags, cnt);
3780         return;
3781 }
3782
3783 /*
3784 ** Initialize filter list and add filters that the hardware
3785 ** needs to know about.
3786 **
3787 ** Requires VSI's filter list & seid to be set before calling.
3788 */
3789 static void
3790 ixl_init_filters(struct ixl_vsi *vsi)
3791 {
3792         /* Add broadcast address */
3793         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3794
3795         /*
3796          * Prevent Tx flow control frames from being sent out by
3797          * non-firmware transmitters.
3798          */
3799         i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3800 }
3801
3802 /*
3803 ** This routine adds mulicast filters
3804 */
3805 static void
3806 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3807 {
3808         struct ixl_mac_filter *f;
3809
3810         /* Does one already exist */
3811         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3812         if (f != NULL)
3813                 return;
3814
3815         f = ixl_get_filter(vsi);
3816         if (f == NULL) {
3817                 printf("WARNING: no filter available!!\n");
3818                 return;
3819         }
3820         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3821         f->vlan = IXL_VLAN_ANY;
3822         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3823             | IXL_FILTER_MC);
3824
3825         return;
3826 }
3827
3828 static void
3829 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3830 {
3831
3832         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3833 }
3834
3835 /*
3836 ** This routine adds macvlan filters
3837 */
3838 static void
3839 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3840 {
3841         struct ixl_mac_filter   *f, *tmp;
3842         struct ixl_pf           *pf;
3843         device_t                dev;
3844
3845         DEBUGOUT("ixl_add_filter: begin");
3846
3847         pf = vsi->back;
3848         dev = pf->dev;
3849
3850         /* Does one already exist */
3851         f = ixl_find_filter(vsi, macaddr, vlan);
3852         if (f != NULL)
3853                 return;
3854         /*
3855         ** Is this the first vlan being registered, if so we
3856         ** need to remove the ANY filter that indicates we are
3857         ** not in a vlan, and replace that with a 0 filter.
3858         */
3859         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3860                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3861                 if (tmp != NULL) {
3862                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3863                         ixl_add_filter(vsi, macaddr, 0);
3864                 }
3865         }
3866
3867         f = ixl_get_filter(vsi);
3868         if (f == NULL) {
3869                 device_printf(dev, "WARNING: no filter available!!\n");
3870                 return;
3871         }
3872         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3873         f->vlan = vlan;
3874         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3875         if (f->vlan != IXL_VLAN_ANY)
3876                 f->flags |= IXL_FILTER_VLAN;
3877         else
3878                 vsi->num_macs++;
3879
3880         ixl_add_hw_filters(vsi, f->flags, 1);
3881         return;
3882 }
3883
3884 static void
3885 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3886 {
3887         struct ixl_mac_filter *f;
3888
3889         f = ixl_find_filter(vsi, macaddr, vlan);
3890         if (f == NULL)
3891                 return;
3892
3893         f->flags |= IXL_FILTER_DEL;
3894         ixl_del_hw_filters(vsi, 1);
3895         vsi->num_macs--;
3896
3897         /* Check if this is the last vlan removal */
3898         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3899                 /* Switch back to a non-vlan filter */
3900                 ixl_del_filter(vsi, macaddr, 0);
3901                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3902         }
3903         return;
3904 }
3905
3906 /*
3907 ** Find the filter with both matching mac addr and vlan id
3908 */
3909 static struct ixl_mac_filter *
3910 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3911 {
3912         struct ixl_mac_filter   *f;
3913         bool                    match = FALSE;
3914
3915         SLIST_FOREACH(f, &vsi->ftl, next) {
3916                 if (!cmp_etheraddr(f->macaddr, macaddr))
3917                         continue;
3918                 if (f->vlan == vlan) {
3919                         match = TRUE;
3920                         break;
3921                 }
3922         }       
3923
3924         if (!match)
3925                 f = NULL;
3926         return (f);
3927 }
3928
3929 /*
3930 ** This routine takes additions to the vsi filter
3931 ** table and creates an Admin Queue call to create
3932 ** the filters in the hardware.
3933 */
3934 static void
3935 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3936 {
3937         struct i40e_aqc_add_macvlan_element_data *a, *b;
3938         struct ixl_mac_filter   *f;
3939         struct ixl_pf           *pf;
3940         struct i40e_hw          *hw;
3941         device_t                dev;
3942         int                     err, j = 0;
3943
3944         pf = vsi->back;
3945         dev = pf->dev;
3946         hw = &pf->hw;
3947         IXL_PF_LOCK_ASSERT(pf);
3948
3949         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3950             M_DEVBUF, M_NOWAIT | M_ZERO);
3951         if (a == NULL) {
3952                 device_printf(dev, "add_hw_filters failed to get memory\n");
3953                 return;
3954         }
3955
3956         /*
3957         ** Scan the filter list, each time we find one
3958         ** we add it to the admin queue array and turn off
3959         ** the add bit.
3960         */
3961         SLIST_FOREACH(f, &vsi->ftl, next) {
3962                 if (f->flags == flags) {
3963                         b = &a[j]; // a pox on fvl long names :)
3964                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3965                         if (f->vlan == IXL_VLAN_ANY) {
3966                                 b->vlan_tag = 0;
3967                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3968                         } else {
3969                                 b->vlan_tag = f->vlan;
3970                                 b->flags = 0;
3971                         }
3972                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3973                         f->flags &= ~IXL_FILTER_ADD;
3974                         j++;
3975                 }
3976                 if (j == cnt)
3977                         break;
3978         }
3979         if (j > 0) {
3980                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3981                 if (err) 
3982                         device_printf(dev, "aq_add_macvlan err %d, "
3983                             "aq_error %d\n", err, hw->aq.asq_last_status);
3984                 else
3985                         vsi->hw_filters_add += j;
3986         }
3987         free(a, M_DEVBUF);
3988         return;
3989 }
3990
3991 /*
3992 ** This routine takes removals in the vsi filter
3993 ** table and creates an Admin Queue call to delete
3994 ** the filters in the hardware.
3995 */
3996 static void
3997 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3998 {
3999         struct i40e_aqc_remove_macvlan_element_data *d, *e;
4000         struct ixl_pf           *pf;
4001         struct i40e_hw          *hw;
4002         device_t                dev;
4003         struct ixl_mac_filter   *f, *f_temp;
4004         int                     err, j = 0;
4005
4006         DEBUGOUT("ixl_del_hw_filters: begin\n");
4007
4008         pf = vsi->back;
4009         hw = &pf->hw;
4010         dev = pf->dev;
4011
4012         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
4013             M_DEVBUF, M_NOWAIT | M_ZERO);
4014         if (d == NULL) {
4015                 printf("del hw filter failed to get memory\n");
4016                 return;
4017         }
4018
4019         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
4020                 if (f->flags & IXL_FILTER_DEL) {
4021                         e = &d[j]; // a pox on fvl long names :)
4022                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
4023                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
4024                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4025                         /* delete entry from vsi list */
4026                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
4027                         free(f, M_DEVBUF);
4028                         j++;
4029                 }
4030                 if (j == cnt)
4031                         break;
4032         }
4033         if (j > 0) {
4034                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
4035                 /* NOTE: returns ENOENT every time but seems to work fine,
4036                    so we'll ignore that specific error. */
4037                 // TODO: Does this still occur on current firmwares?
4038                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
4039                         int sc = 0;
4040                         for (int i = 0; i < j; i++)
4041                                 sc += (!d[i].error_code);
4042                         vsi->hw_filters_del += sc;
4043                         device_printf(dev,
4044                             "Failed to remove %d/%d filters, aq error %d\n",
4045                             j - sc, j, hw->aq.asq_last_status);
4046                 } else
4047                         vsi->hw_filters_del += j;
4048         }
4049         free(d, M_DEVBUF);
4050
4051         DEBUGOUT("ixl_del_hw_filters: end\n");
4052         return;
4053 }
4054
4055 static int
4056 ixl_enable_rings(struct ixl_vsi *vsi)
4057 {
4058         struct ixl_pf   *pf = vsi->back;
4059         struct i40e_hw  *hw = &pf->hw;
4060         int             index, error;
4061         u32             reg;
4062
4063         error = 0;
4064         for (int i = 0; i < vsi->num_queues; i++) {
4065                 index = vsi->first_queue + i;
4066                 i40e_pre_tx_queue_cfg(hw, index, TRUE);
4067
4068                 reg = rd32(hw, I40E_QTX_ENA(index));
4069                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
4070                     I40E_QTX_ENA_QENA_STAT_MASK;
4071                 wr32(hw, I40E_QTX_ENA(index), reg);
4072                 /* Verify the enable took */
4073                 for (int j = 0; j < 10; j++) {
4074                         reg = rd32(hw, I40E_QTX_ENA(index));
4075                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4076                                 break;
4077                         i40e_msec_delay(10);
4078                 }
4079                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
4080                         device_printf(pf->dev, "TX queue %d disabled!\n",
4081                             index);
4082                         error = ETIMEDOUT;
4083                 }
4084
4085                 reg = rd32(hw, I40E_QRX_ENA(index));
4086                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
4087                     I40E_QRX_ENA_QENA_STAT_MASK;
4088                 wr32(hw, I40E_QRX_ENA(index), reg);
4089                 /* Verify the enable took */
4090                 for (int j = 0; j < 10; j++) {
4091                         reg = rd32(hw, I40E_QRX_ENA(index));
4092                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4093                                 break;
4094                         i40e_msec_delay(10);
4095                 }
4096                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
4097                         device_printf(pf->dev, "RX queue %d disabled!\n",
4098                             index);
4099                         error = ETIMEDOUT;
4100                 }
4101         }
4102
4103         return (error);
4104 }
4105
4106 static int
4107 ixl_disable_rings(struct ixl_vsi *vsi)
4108 {
4109         struct ixl_pf   *pf = vsi->back;
4110         struct i40e_hw  *hw = &pf->hw;
4111         int             index, error;
4112         u32             reg;
4113
4114         error = 0;
4115         for (int i = 0; i < vsi->num_queues; i++) {
4116                 index = vsi->first_queue + i;
4117
4118                 i40e_pre_tx_queue_cfg(hw, index, FALSE);
4119                 i40e_usec_delay(500);
4120
4121                 reg = rd32(hw, I40E_QTX_ENA(index));
4122                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4123                 wr32(hw, I40E_QTX_ENA(index), reg);
4124                 /* Verify the disable took */
4125                 for (int j = 0; j < 10; j++) {
4126                         reg = rd32(hw, I40E_QTX_ENA(index));
4127                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4128                                 break;
4129                         i40e_msec_delay(10);
4130                 }
4131                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
4132                         device_printf(pf->dev, "TX queue %d still enabled!\n",
4133                             index);
4134                         error = ETIMEDOUT;
4135                 }
4136
4137                 reg = rd32(hw, I40E_QRX_ENA(index));
4138                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4139                 wr32(hw, I40E_QRX_ENA(index), reg);
4140                 /* Verify the disable took */
4141                 for (int j = 0; j < 10; j++) {
4142                         reg = rd32(hw, I40E_QRX_ENA(index));
4143                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4144                                 break;
4145                         i40e_msec_delay(10);
4146                 }
4147                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
4148                         device_printf(pf->dev, "RX queue %d still enabled!\n",
4149                             index);
4150                         error = ETIMEDOUT;
4151                 }
4152         }
4153
4154         return (error);
4155 }
4156
4157 /**
4158  * ixl_handle_mdd_event
4159  *
4160  * Called from interrupt handler to identify possibly malicious vfs
4161  * (But also detects events from the PF, as well)
4162  **/
4163 static void ixl_handle_mdd_event(struct ixl_pf *pf)
4164 {
4165         struct i40e_hw *hw = &pf->hw;
4166         device_t dev = pf->dev;
4167         bool mdd_detected = false;
4168         bool pf_mdd_detected = false;
4169         u32 reg;
4170
4171         /* find what triggered the MDD event */
4172         reg = rd32(hw, I40E_GL_MDET_TX);
4173         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4174                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
4175                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
4176                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4177                                 I40E_GL_MDET_TX_EVENT_SHIFT;
4178                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
4179                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
4180                 device_printf(dev,
4181                          "Malicious Driver Detection event 0x%02x"
4182                          " on TX queue %d pf number 0x%02x\n",
4183                          event, queue, pf_num);
4184                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4185                 mdd_detected = true;
4186         }
4187         reg = rd32(hw, I40E_GL_MDET_RX);
4188         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4189                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
4190                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
4191                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4192                                 I40E_GL_MDET_RX_EVENT_SHIFT;
4193                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
4194                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
4195                 device_printf(dev,
4196                          "Malicious Driver Detection event 0x%02x"
4197                          " on RX queue %d of function 0x%02x\n",
4198                          event, queue, func);
4199                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4200                 mdd_detected = true;
4201         }
4202
4203         if (mdd_detected) {
4204                 reg = rd32(hw, I40E_PF_MDET_TX);
4205                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4206                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4207                         device_printf(dev,
4208                                  "MDD TX event is for this function 0x%08x",
4209                                  reg);
4210                         pf_mdd_detected = true;
4211                 }
4212                 reg = rd32(hw, I40E_PF_MDET_RX);
4213                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4214                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4215                         device_printf(dev,
4216                                  "MDD RX event is for this function 0x%08x",
4217                                  reg);
4218                         pf_mdd_detected = true;
4219                 }
4220         }
4221
4222         /* re-enable mdd interrupt cause */
4223         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4224         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4225         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4226         ixl_flush(hw);
4227 }
4228
4229 static void
4230 ixl_enable_intr(struct ixl_vsi *vsi)
4231 {
4232         struct i40e_hw          *hw = vsi->hw;
4233         struct ixl_queue        *que = vsi->queues;
4234
4235         if (ixl_enable_msix) {
4236                 ixl_enable_adminq(hw);
4237                 for (int i = 0; i < vsi->num_queues; i++, que++)
4238                         ixl_enable_queue(hw, que->me);
4239         } else
4240                 ixl_enable_legacy(hw);
4241 }
4242
4243 static void
4244 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4245 {
4246         struct i40e_hw          *hw = vsi->hw;
4247         struct ixl_queue        *que = vsi->queues;
4248
4249         for (int i = 0; i < vsi->num_queues; i++, que++)
4250                 ixl_disable_queue(hw, que->me);
4251 }
4252
4253 static void
4254 ixl_disable_intr(struct ixl_vsi *vsi)
4255 {
4256         struct i40e_hw          *hw = vsi->hw;
4257
4258         if (ixl_enable_msix)
4259                 ixl_disable_adminq(hw);
4260         else
4261                 ixl_disable_legacy(hw);
4262 }
4263
4264 static void
4265 ixl_enable_adminq(struct i40e_hw *hw)
4266 {
4267         u32             reg;
4268
4269         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4270             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4271             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4272         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4273         ixl_flush(hw);
4274 }
4275
4276 static void
4277 ixl_disable_adminq(struct i40e_hw *hw)
4278 {
4279         u32             reg;
4280
4281         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4282         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4283         ixl_flush(hw);
4284 }
4285
4286 static void
4287 ixl_enable_queue(struct i40e_hw *hw, int id)
4288 {
4289         u32             reg;
4290
4291         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4292             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4293             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4294         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4295 }
4296
4297 static void
4298 ixl_disable_queue(struct i40e_hw *hw, int id)
4299 {
4300         u32             reg;
4301
4302         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4303         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4304 }
4305
4306 static void
4307 ixl_enable_legacy(struct i40e_hw *hw)
4308 {
4309         u32             reg;
4310         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4311             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4312             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4313         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4314 }
4315
4316 static void
4317 ixl_disable_legacy(struct i40e_hw *hw)
4318 {
4319         u32             reg;
4320
4321         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4322         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4323 }
4324
4325 static void
4326 ixl_update_stats_counters(struct ixl_pf *pf)
4327 {
4328         struct i40e_hw  *hw = &pf->hw;
4329         struct ixl_vsi  *vsi = &pf->vsi;
4330         struct ixl_vf   *vf;
4331
4332         struct i40e_hw_port_stats *nsd = &pf->stats;
4333         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4334
4335         /* Update hw stats */
4336         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4337                            pf->stat_offsets_loaded,
4338                            &osd->crc_errors, &nsd->crc_errors);
4339         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4340                            pf->stat_offsets_loaded,
4341                            &osd->illegal_bytes, &nsd->illegal_bytes);
4342         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4343                            I40E_GLPRT_GORCL(hw->port),
4344                            pf->stat_offsets_loaded,
4345                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4346         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4347                            I40E_GLPRT_GOTCL(hw->port),
4348                            pf->stat_offsets_loaded,
4349                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4350         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4351                            pf->stat_offsets_loaded,
4352                            &osd->eth.rx_discards,
4353                            &nsd->eth.rx_discards);
4354         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4355                            I40E_GLPRT_UPRCL(hw->port),
4356                            pf->stat_offsets_loaded,
4357                            &osd->eth.rx_unicast,
4358                            &nsd->eth.rx_unicast);
4359         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4360                            I40E_GLPRT_UPTCL(hw->port),
4361                            pf->stat_offsets_loaded,
4362                            &osd->eth.tx_unicast,
4363                            &nsd->eth.tx_unicast);
4364         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4365                            I40E_GLPRT_MPRCL(hw->port),
4366                            pf->stat_offsets_loaded,
4367                            &osd->eth.rx_multicast,
4368                            &nsd->eth.rx_multicast);
4369         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4370                            I40E_GLPRT_MPTCL(hw->port),
4371                            pf->stat_offsets_loaded,
4372                            &osd->eth.tx_multicast,
4373                            &nsd->eth.tx_multicast);
4374         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4375                            I40E_GLPRT_BPRCL(hw->port),
4376                            pf->stat_offsets_loaded,
4377                            &osd->eth.rx_broadcast,
4378                            &nsd->eth.rx_broadcast);
4379         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4380                            I40E_GLPRT_BPTCL(hw->port),
4381                            pf->stat_offsets_loaded,
4382                            &osd->eth.tx_broadcast,
4383                            &nsd->eth.tx_broadcast);
4384
4385         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4386                            pf->stat_offsets_loaded,
4387                            &osd->tx_dropped_link_down,
4388                            &nsd->tx_dropped_link_down);
4389         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4390                            pf->stat_offsets_loaded,
4391                            &osd->mac_local_faults,
4392                            &nsd->mac_local_faults);
4393         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4394                            pf->stat_offsets_loaded,
4395                            &osd->mac_remote_faults,
4396                            &nsd->mac_remote_faults);
4397         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4398                            pf->stat_offsets_loaded,
4399                            &osd->rx_length_errors,
4400                            &nsd->rx_length_errors);
4401
4402         /* Flow control (LFC) stats */
4403         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4404                            pf->stat_offsets_loaded,
4405                            &osd->link_xon_rx, &nsd->link_xon_rx);
4406         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4407                            pf->stat_offsets_loaded,
4408                            &osd->link_xon_tx, &nsd->link_xon_tx);
4409         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4410                            pf->stat_offsets_loaded,
4411                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
4412         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4413                            pf->stat_offsets_loaded,
4414                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
4415
4416         /* Packet size stats rx */
4417         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4418                            I40E_GLPRT_PRC64L(hw->port),
4419                            pf->stat_offsets_loaded,
4420                            &osd->rx_size_64, &nsd->rx_size_64);
4421         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4422                            I40E_GLPRT_PRC127L(hw->port),
4423                            pf->stat_offsets_loaded,
4424                            &osd->rx_size_127, &nsd->rx_size_127);
4425         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4426                            I40E_GLPRT_PRC255L(hw->port),
4427                            pf->stat_offsets_loaded,
4428                            &osd->rx_size_255, &nsd->rx_size_255);
4429         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4430                            I40E_GLPRT_PRC511L(hw->port),
4431                            pf->stat_offsets_loaded,
4432                            &osd->rx_size_511, &nsd->rx_size_511);
4433         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4434                            I40E_GLPRT_PRC1023L(hw->port),
4435                            pf->stat_offsets_loaded,
4436                            &osd->rx_size_1023, &nsd->rx_size_1023);
4437         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4438                            I40E_GLPRT_PRC1522L(hw->port),
4439                            pf->stat_offsets_loaded,
4440                            &osd->rx_size_1522, &nsd->rx_size_1522);
4441         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4442                            I40E_GLPRT_PRC9522L(hw->port),
4443                            pf->stat_offsets_loaded,
4444                            &osd->rx_size_big, &nsd->rx_size_big);
4445
4446         /* Packet size stats tx */
4447         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4448                            I40E_GLPRT_PTC64L(hw->port),
4449                            pf->stat_offsets_loaded,
4450                            &osd->tx_size_64, &nsd->tx_size_64);
4451         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4452                            I40E_GLPRT_PTC127L(hw->port),
4453                            pf->stat_offsets_loaded,
4454                            &osd->tx_size_127, &nsd->tx_size_127);
4455         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4456                            I40E_GLPRT_PTC255L(hw->port),
4457                            pf->stat_offsets_loaded,
4458                            &osd->tx_size_255, &nsd->tx_size_255);
4459         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4460                            I40E_GLPRT_PTC511L(hw->port),
4461                            pf->stat_offsets_loaded,
4462                            &osd->tx_size_511, &nsd->tx_size_511);
4463         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4464                            I40E_GLPRT_PTC1023L(hw->port),
4465                            pf->stat_offsets_loaded,
4466                            &osd->tx_size_1023, &nsd->tx_size_1023);
4467         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4468                            I40E_GLPRT_PTC1522L(hw->port),
4469                            pf->stat_offsets_loaded,
4470                            &osd->tx_size_1522, &nsd->tx_size_1522);
4471         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4472                            I40E_GLPRT_PTC9522L(hw->port),
4473                            pf->stat_offsets_loaded,
4474                            &osd->tx_size_big, &nsd->tx_size_big);
4475
4476         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4477                            pf->stat_offsets_loaded,
4478                            &osd->rx_undersize, &nsd->rx_undersize);
4479         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4480                            pf->stat_offsets_loaded,
4481                            &osd->rx_fragments, &nsd->rx_fragments);
4482         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4483                            pf->stat_offsets_loaded,
4484                            &osd->rx_oversize, &nsd->rx_oversize);
4485         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4486                            pf->stat_offsets_loaded,
4487                            &osd->rx_jabber, &nsd->rx_jabber);
4488         pf->stat_offsets_loaded = true;
4489         /* End hw stats */
4490
4491         /* Update vsi stats */
4492         ixl_update_vsi_stats(vsi);
4493
4494         for (int i = 0; i < pf->num_vfs; i++) {
4495                 vf = &pf->vfs[i];
4496                 if (vf->vf_flags & VF_FLAG_ENABLED)
4497                         ixl_update_eth_stats(&pf->vfs[i].vsi);
4498         }
4499 }
4500
4501 /*
4502 ** Tasklet handler for MSIX Adminq interrupts
4503 **  - do outside interrupt since it might sleep
4504 */
4505 static void
4506 ixl_do_adminq(void *context, int pending)
4507 {
4508         struct ixl_pf                   *pf = context;
4509         struct i40e_hw                  *hw = &pf->hw;
4510         struct i40e_arq_event_info      event;
4511         i40e_status                     ret;
4512         device_t                        dev = pf->dev;
4513         u32                             reg, loop = 0;
4514         u16                             opcode, result;
4515
4516         // XXX: Possibly inappropriate overload
4517         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4518                 int count = 0;
4519                 // ERJ: Typically finishes within 3-4 seconds
4520                 while (count++ < 100) {
4521                         reg = rd32(hw, I40E_GLGEN_RSTAT);
4522                         reg = reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
4523                         if (reg) {
4524                                 i40e_msec_delay(100);
4525                         } else {
4526                                 break;
4527                         }
4528                 }
4529                 device_printf(dev, "EMPR reset wait count: %d\n", count);
4530
4531                 device_printf(dev, "Rebuilding HW structs...\n");
4532                 // XXX: I feel like this could cause a kernel panic some time in the future
4533                 ixl_stop(pf);
4534                 ixl_init(pf);
4535
4536                 atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4537                 return;
4538         }
4539
4540         // Actually do Admin Queue handling
4541         event.buf_len = IXL_AQ_BUF_SZ;
4542         event.msg_buf = malloc(event.buf_len,
4543             M_DEVBUF, M_NOWAIT | M_ZERO);
4544         if (!event.msg_buf) {
4545                 device_printf(dev, "%s: Unable to allocate memory for Admin"
4546                     " Queue event!\n", __func__);
4547                 return;
4548         }
4549
4550         IXL_PF_LOCK(pf);
4551         /* clean and process any events */
4552         do {
4553                 ret = i40e_clean_arq_element(hw, &event, &result);
4554                 if (ret)
4555                         break;
4556                 opcode = LE16_TO_CPU(event.desc.opcode);
4557 #ifdef IXL_DEBUG
4558                 device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode);
4559 #endif
4560                 switch (opcode) {
4561                 case i40e_aqc_opc_get_link_status:
4562                         ixl_link_event(pf, &event);
4563                         break;
4564                 case i40e_aqc_opc_send_msg_to_pf:
4565 #ifdef PCI_IOV
4566                         ixl_handle_vf_msg(pf, &event);
4567 #endif
4568                         break;
4569                 case i40e_aqc_opc_event_lan_overflow:
4570                 default:
4571                         break;
4572                 }
4573
4574         } while (result && (loop++ < IXL_ADM_LIMIT));
4575
4576         free(event.msg_buf, M_DEVBUF);
4577
4578         /*
4579          * If there are still messages to process, reschedule ourselves.
4580          * Otherwise, re-enable our interrupt and go to sleep.
4581          */
4582         if (result > 0)
4583                 taskqueue_enqueue(pf->tq, &pf->adminq);
4584         else
4585                 ixl_enable_adminq(hw);
4586
4587         IXL_PF_UNLOCK(pf);
4588 }
4589
4590 /**
4591  * Update VSI-specific ethernet statistics counters.
4592  **/
4593 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4594 {
4595         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4596         struct i40e_hw *hw = &pf->hw;
4597         struct i40e_eth_stats *es;
4598         struct i40e_eth_stats *oes;
4599         struct i40e_hw_port_stats *nsd;
4600         u16 stat_idx = vsi->info.stat_counter_idx;
4601
4602         es = &vsi->eth_stats;
4603         oes = &vsi->eth_stats_offsets;
4604         nsd = &pf->stats;
4605
4606         /* Gather up the stats that the hw collects */
4607         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4608                            vsi->stat_offsets_loaded,
4609                            &oes->tx_errors, &es->tx_errors);
4610         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4611                            vsi->stat_offsets_loaded,
4612                            &oes->rx_discards, &es->rx_discards);
4613
4614         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4615                            I40E_GLV_GORCL(stat_idx),
4616                            vsi->stat_offsets_loaded,
4617                            &oes->rx_bytes, &es->rx_bytes);
4618         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4619                            I40E_GLV_UPRCL(stat_idx),
4620                            vsi->stat_offsets_loaded,
4621                            &oes->rx_unicast, &es->rx_unicast);
4622         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4623                            I40E_GLV_MPRCL(stat_idx),
4624                            vsi->stat_offsets_loaded,
4625                            &oes->rx_multicast, &es->rx_multicast);
4626         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4627                            I40E_GLV_BPRCL(stat_idx),
4628                            vsi->stat_offsets_loaded,
4629                            &oes->rx_broadcast, &es->rx_broadcast);
4630
4631         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4632                            I40E_GLV_GOTCL(stat_idx),
4633                            vsi->stat_offsets_loaded,
4634                            &oes->tx_bytes, &es->tx_bytes);
4635         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4636                            I40E_GLV_UPTCL(stat_idx),
4637                            vsi->stat_offsets_loaded,
4638                            &oes->tx_unicast, &es->tx_unicast);
4639         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4640                            I40E_GLV_MPTCL(stat_idx),
4641                            vsi->stat_offsets_loaded,
4642                            &oes->tx_multicast, &es->tx_multicast);
4643         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4644                            I40E_GLV_BPTCL(stat_idx),
4645                            vsi->stat_offsets_loaded,
4646                            &oes->tx_broadcast, &es->tx_broadcast);
4647         vsi->stat_offsets_loaded = true;
4648 }
4649
4650 static void
4651 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4652 {
4653         struct ixl_pf           *pf;
4654         struct ifnet            *ifp;
4655         struct i40e_eth_stats   *es;
4656         u64                     tx_discards;
4657
4658         struct i40e_hw_port_stats *nsd;
4659
4660         pf = vsi->back;
4661         ifp = vsi->ifp;
4662         es = &vsi->eth_stats;
4663         nsd = &pf->stats;
4664
4665         ixl_update_eth_stats(vsi);
4666
4667         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4668         for (int i = 0; i < vsi->num_queues; i++)
4669                 tx_discards += vsi->queues[i].txr.br->br_drops;
4670
4671         /* Update ifnet stats */
4672         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4673                            es->rx_multicast +
4674                            es->rx_broadcast);
4675         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4676                            es->tx_multicast +
4677                            es->tx_broadcast);
4678         IXL_SET_IBYTES(vsi, es->rx_bytes);
4679         IXL_SET_OBYTES(vsi, es->tx_bytes);
4680         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4681         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4682
4683         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4684             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4685             nsd->rx_jabber);
4686         IXL_SET_OERRORS(vsi, es->tx_errors);
4687         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4688         IXL_SET_OQDROPS(vsi, tx_discards);
4689         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4690         IXL_SET_COLLISIONS(vsi, 0);
4691 }
4692
4693 /**
4694  * Reset all of the stats for the given pf
4695  **/
4696 void ixl_pf_reset_stats(struct ixl_pf *pf)
4697 {
4698         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4699         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4700         pf->stat_offsets_loaded = false;
4701 }
4702
4703 /**
4704  * Resets all stats of the given vsi
4705  **/
4706 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4707 {
4708         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4709         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4710         vsi->stat_offsets_loaded = false;
4711 }
4712
4713 /**
4714  * Read and update a 48 bit stat from the hw
4715  *
4716  * Since the device stats are not reset at PFReset, they likely will not
4717  * be zeroed when the driver starts.  We'll save the first values read
4718  * and use them as offsets to be subtracted from the raw values in order
4719  * to report stats that count from zero.
4720  **/
4721 static void
4722 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4723         bool offset_loaded, u64 *offset, u64 *stat)
4724 {
4725         u64 new_data;
4726
4727 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4728         new_data = rd64(hw, loreg);
4729 #else
4730         /*
4731          * Use two rd32's instead of one rd64; FreeBSD versions before
4732          * 10 don't support 8 byte bus reads/writes.
4733          */
4734         new_data = rd32(hw, loreg);
4735         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4736 #endif
4737
4738         if (!offset_loaded)
4739                 *offset = new_data;
4740         if (new_data >= *offset)
4741                 *stat = new_data - *offset;
4742         else
4743                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4744         *stat &= 0xFFFFFFFFFFFFULL;
4745 }
4746
4747 /**
4748  * Read and update a 32 bit stat from the hw
4749  **/
4750 static void
4751 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4752         bool offset_loaded, u64 *offset, u64 *stat)
4753 {
4754         u32 new_data;
4755
4756         new_data = rd32(hw, reg);
4757         if (!offset_loaded)
4758                 *offset = new_data;
4759         if (new_data >= *offset)
4760                 *stat = (u32)(new_data - *offset);
4761         else
4762                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4763 }
4764
4765 static void
4766 ixl_add_device_sysctls(struct ixl_pf *pf)
4767 {
4768         device_t dev = pf->dev;
4769
4770         /* Set up sysctls */
4771         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4772             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4773             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4774             pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4775
4776         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4777             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4778             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4779             pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4780
4781         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4782             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4783             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4784             pf, 0, ixl_current_speed, "A", "Current Port Speed");
4785
4786         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4787             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4788             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4789             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4790
4791         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4792             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4793             OID_AUTO, "rx_itr", CTLFLAG_RW,
4794             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
4795
4796         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4797             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4798             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4799             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
4800
4801         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4802             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4803             OID_AUTO, "tx_itr", CTLFLAG_RW,
4804             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
4805
4806         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4807             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4808             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4809             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
4810
4811 #ifdef IXL_DEBUG_SYSCTL
4812         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4813             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4814             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
4815             ixl_debug_info, "I", "Debug Information");
4816
4817         /* Shared-code debug message level */
4818         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4819             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4820             OID_AUTO, "debug_mask", CTLFLAG_RW,
4821             &pf->hw.debug_mask, 0, "Debug Message Level");
4822
4823         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4824             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4825             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4826             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4827
4828         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4829             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4830             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4831             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4832
4833         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4834             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4835             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4836             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4837
4838         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4839             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4840             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4841             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4842
4843         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4844             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4845             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4846             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4847
4848 #ifdef PCI_IOV
4849         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4850             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4851             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4852             0, "PF/VF Virtual Channel debug level");
4853 #endif
4854 #endif
4855 }
4856
4857 /*
4858 ** Set flow control using sysctl:
4859 **      0 - off
4860 **      1 - rx pause
4861 **      2 - tx pause
4862 **      3 - full
4863 */
4864 static int
4865 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4866 {
4867         /*
4868          * TODO: ensure tx CRC by hardware should be enabled
4869          * if tx flow control is enabled.
4870          * ^ N/A for 40G ports
4871          */
4872         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4873         struct i40e_hw *hw = &pf->hw;
4874         device_t dev = pf->dev;
4875         int error = 0;
4876         enum i40e_status_code aq_error = 0;
4877         u8 fc_aq_err = 0;
4878
4879         /* Get request */
4880         error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4881         if ((error) || (req->newptr == NULL))
4882                 return (error);
4883         if (pf->fc < 0 || pf->fc > 3) {
4884                 device_printf(dev,
4885                     "Invalid fc mode; valid modes are 0 through 3\n");
4886                 return (EINVAL);
4887         }
4888
4889         /*
4890         ** Changing flow control mode currently does not work on
4891         ** 40GBASE-CR4 PHYs
4892         */
4893         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4894             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4895                 device_printf(dev, "Changing flow control mode unsupported"
4896                     " on 40GBase-CR4 media.\n");
4897                 return (ENODEV);
4898         }
4899
4900         /* Set fc ability for port */
4901         hw->fc.requested_mode = pf->fc;
4902         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4903         if (aq_error) {
4904                 device_printf(dev,
4905                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4906                     __func__, aq_error, fc_aq_err);
4907                 return (EIO);
4908         }
4909
4910         /* Get new link state */
4911         i40e_msec_delay(250);
4912         hw->phy.get_link_info = TRUE;
4913         i40e_get_link_status(hw, &pf->link_up);
4914
4915         return (0);
4916 }
4917
4918 static int
4919 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4920 {
4921         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4922         struct i40e_hw *hw = &pf->hw;
4923         int error = 0, index = 0;
4924
4925         char *speeds[] = {
4926                 "Unknown",
4927                 "100M",
4928                 "1G",
4929                 "10G",
4930                 "40G",
4931                 "20G"
4932         };
4933
4934         ixl_update_link_status(pf);
4935
4936         switch (hw->phy.link_info.link_speed) {
4937         case I40E_LINK_SPEED_100MB:
4938                 index = 1;
4939                 break;
4940         case I40E_LINK_SPEED_1GB:
4941                 index = 2;
4942                 break;
4943         case I40E_LINK_SPEED_10GB:
4944                 index = 3;
4945                 break;
4946         case I40E_LINK_SPEED_40GB:
4947                 index = 4;
4948                 break;
4949         case I40E_LINK_SPEED_20GB:
4950                 index = 5;
4951                 break;
4952         case I40E_LINK_SPEED_UNKNOWN:
4953         default:
4954                 index = 0;
4955                 break;
4956         }
4957
4958         error = sysctl_handle_string(oidp, speeds[index],
4959             strlen(speeds[index]), req);
4960         return (error);
4961 }
4962
4963 static int
4964 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4965 {
4966         struct i40e_hw *hw = &pf->hw;
4967         device_t dev = pf->dev;
4968         struct i40e_aq_get_phy_abilities_resp abilities;
4969         struct i40e_aq_set_phy_config config;
4970         enum i40e_status_code aq_error = 0;
4971
4972         /* Get current capability information */
4973         aq_error = i40e_aq_get_phy_capabilities(hw,
4974             FALSE, FALSE, &abilities, NULL);
4975         if (aq_error) {
4976                 device_printf(dev,
4977                     "%s: Error getting phy capabilities %d,"
4978                     " aq error: %d\n", __func__, aq_error,
4979                     hw->aq.asq_last_status);
4980                 return (EAGAIN);
4981         }
4982
4983         /* Prepare new config */
4984         bzero(&config, sizeof(config));
4985         config.phy_type = abilities.phy_type;
4986         config.abilities = abilities.abilities
4987             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4988         config.eee_capability = abilities.eee_capability;
4989         config.eeer = abilities.eeer_val;
4990         config.low_power_ctrl = abilities.d3_lpan;
4991         /* Translate into aq cmd link_speed */
4992         if (speeds & 0x10)
4993                 config.link_speed |= I40E_LINK_SPEED_40GB;
4994         if (speeds & 0x8)
4995                 config.link_speed |= I40E_LINK_SPEED_20GB;
4996         if (speeds & 0x4)
4997                 config.link_speed |= I40E_LINK_SPEED_10GB;
4998         if (speeds & 0x2)
4999                 config.link_speed |= I40E_LINK_SPEED_1GB;
5000         if (speeds & 0x1)
5001                 config.link_speed |= I40E_LINK_SPEED_100MB;
5002
5003         /* Do aq command & restart link */
5004         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
5005         if (aq_error) {
5006                 device_printf(dev,
5007                     "%s: Error setting new phy config %d,"
5008                     " aq error: %d\n", __func__, aq_error,
5009                     hw->aq.asq_last_status);
5010                 return (EAGAIN);
5011         }
5012
5013         /*
5014         ** This seems a bit heavy handed, but we
5015         ** need to get a reinit on some devices
5016         */
5017         IXL_PF_LOCK(pf);
5018         ixl_stop_locked(pf);
5019         ixl_init_locked(pf);
5020         IXL_PF_UNLOCK(pf);
5021
5022         return (0);
5023 }
5024
5025 /*
5026 ** Control link advertise speed:
5027 **      Flags:
5028 **       0x1 - advertise 100 Mb
5029 **       0x2 - advertise 1G
5030 **       0x4 - advertise 10G
5031 **       0x8 - advertise 20G
5032 **      0x10 - advertise 40G
5033 **
5034 **      Set to 0 to disable link
5035 */
5036 static int
5037 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
5038 {
5039         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5040         struct i40e_hw *hw = &pf->hw;
5041         device_t dev = pf->dev;
5042         int requested_ls = 0;
5043         int error = 0;
5044
5045         /* Read in new mode */
5046         requested_ls = pf->advertised_speed;
5047         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
5048         if ((error) || (req->newptr == NULL))
5049                 return (error);
5050         /* Check for sane value */
5051         if (requested_ls > 0x10) {
5052                 device_printf(dev, "Invalid advertised speed; "
5053                     "valid modes are 0x1 through 0x10\n");
5054                 return (EINVAL);
5055         }
5056         /* Then check for validity based on adapter type */
5057         switch (hw->device_id) {
5058         case I40E_DEV_ID_10G_BASE_T:
5059         case I40E_DEV_ID_10G_BASE_T4:
5060                 /* BaseT */
5061                 if (requested_ls & ~(0x7)) {
5062                         device_printf(dev,
5063                             "Only 100M/1G/10G speeds supported on this device.\n");
5064                         return (EINVAL);
5065                 }
5066                 break;
5067         case I40E_DEV_ID_20G_KR2:
5068         case I40E_DEV_ID_20G_KR2_A:
5069                 /* 20G */
5070                 if (requested_ls & ~(0xE)) {
5071                         device_printf(dev,
5072                             "Only 1G/10G/20G speeds supported on this device.\n");
5073                         return (EINVAL);
5074                 }
5075                 break;
5076         case I40E_DEV_ID_KX_B:
5077         case I40E_DEV_ID_QSFP_A:
5078         case I40E_DEV_ID_QSFP_B:
5079                 /* 40G */
5080                 if (requested_ls & ~(0x10)) {
5081                         device_printf(dev,
5082                             "Only 40G speeds supported on this device.\n");
5083                         return (EINVAL);
5084                 }
5085                 break;
5086         default:
5087                 /* 10G (1G) */
5088                 if (requested_ls & ~(0x6)) {
5089                         device_printf(dev,
5090                             "Only 1/10Gbs speeds are supported on this device.\n");
5091                         return (EINVAL);
5092                 }
5093                 break;
5094         }
5095
5096         /* Exit if no change */
5097         if (pf->advertised_speed == requested_ls)
5098                 return (0);
5099
5100         error = ixl_set_advertised_speeds(pf, requested_ls);
5101         if (error)
5102                 return (error);
5103
5104         pf->advertised_speed = requested_ls;
5105         ixl_update_link_status(pf);
5106         return (0);
5107 }
5108
5109 /*
5110 ** Get the width and transaction speed of
5111 ** the bus this adapter is plugged into.
5112 */
5113 static u16
5114 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
5115 {
5116         u16                     link;
5117         u32                     offset;
5118                 
5119         /* Get the PCI Express Capabilities offset */
5120         pci_find_cap(dev, PCIY_EXPRESS, &offset);
5121
5122         /* ...and read the Link Status Register */
5123         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
5124
5125         switch (link & I40E_PCI_LINK_WIDTH) {
5126         case I40E_PCI_LINK_WIDTH_1:
5127                 hw->bus.width = i40e_bus_width_pcie_x1;
5128                 break;
5129         case I40E_PCI_LINK_WIDTH_2:
5130                 hw->bus.width = i40e_bus_width_pcie_x2;
5131                 break;
5132         case I40E_PCI_LINK_WIDTH_4:
5133                 hw->bus.width = i40e_bus_width_pcie_x4;
5134                 break;
5135         case I40E_PCI_LINK_WIDTH_8:
5136                 hw->bus.width = i40e_bus_width_pcie_x8;
5137                 break;
5138         default:
5139                 hw->bus.width = i40e_bus_width_unknown;
5140                 break;
5141         }
5142
5143         switch (link & I40E_PCI_LINK_SPEED) {
5144         case I40E_PCI_LINK_SPEED_2500:
5145                 hw->bus.speed = i40e_bus_speed_2500;
5146                 break;
5147         case I40E_PCI_LINK_SPEED_5000:
5148                 hw->bus.speed = i40e_bus_speed_5000;
5149                 break;
5150         case I40E_PCI_LINK_SPEED_8000:
5151                 hw->bus.speed = i40e_bus_speed_8000;
5152                 break;
5153         default:
5154                 hw->bus.speed = i40e_bus_speed_unknown;
5155                 break;
5156         }
5157
5158
5159         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
5160             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
5161             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
5162             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
5163             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
5164             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
5165             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
5166             ("Unknown"));
5167
5168         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
5169             (hw->bus.speed < i40e_bus_speed_8000)) {
5170                 device_printf(dev, "PCI-Express bandwidth available"
5171                     " for this device\n     may be insufficient for"
5172                     " optimal performance.\n");
5173                 device_printf(dev, "For expected performance a x8 "
5174                     "PCIE Gen3 slot is required.\n");
5175         }
5176
5177         return (link);
5178 }
5179
5180 static int
5181 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
5182 {
5183         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
5184         struct i40e_hw  *hw = &pf->hw;
5185         struct sbuf     *sbuf;
5186
5187         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5188         ixl_nvm_version_str(hw, sbuf);
5189         sbuf_finish(sbuf);
5190         sbuf_delete(sbuf);
5191
5192         return 0;
5193 }
5194
5195 static int
5196 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5197 {
5198         struct i40e_hw *hw = &pf->hw;
5199         struct i40e_nvm_access *nvma;
5200         device_t dev = pf->dev;
5201         enum i40e_status_code status = 0;
5202         int perrno;
5203
5204         DEBUGFUNC("ixl_handle_nvmupd_cmd");
5205
5206         if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
5207             ifd->ifd_data == NULL) {
5208                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__);
5209                 device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__,
5210                     ifd->ifd_len, sizeof(struct i40e_nvm_access));
5211                 device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data);
5212                 return (EINVAL);
5213         }
5214
5215         nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5216
5217         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5218                 int count = 0;
5219                 while (count++ < 100) {
5220                         i40e_msec_delay(100);
5221                         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5222                                 break;
5223                 }
5224                 // device_printf(dev, "ioctl EMPR reset wait count %d\n", count);
5225         }
5226
5227         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5228                 IXL_PF_LOCK(pf);
5229                 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5230                 IXL_PF_UNLOCK(pf);
5231         } else {
5232                 perrno = -EBUSY;
5233         }
5234
5235         if (status)
5236                 device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
5237                     status, perrno);
5238
5239         /*
5240          * -EPERM is actually ERESTART, which the kernel interprets as it needing
5241          * to run this ioctl again. So use -EACCES for -EPERM instead.
5242          */
5243         if (perrno == -EPERM)
5244                 return (-EACCES);
5245         else
5246                 return (perrno);
5247 }
5248
5249 #ifdef IXL_DEBUG_SYSCTL
5250 static int
5251 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5252 {
5253         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5254         struct i40e_hw *hw = &pf->hw;
5255         struct i40e_link_status link_status;
5256         char buf[512];
5257
5258         enum i40e_status_code aq_error = 0;
5259
5260         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
5261         if (aq_error) {
5262                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
5263                 return (EPERM);
5264         }
5265
5266         sprintf(buf, "\n"
5267             "PHY Type : %#04x\n"
5268             "Speed    : %#04x\n" 
5269             "Link info: %#04x\n" 
5270             "AN info  : %#04x\n" 
5271             "Ext info : %#04x\n"
5272             "Max Frame: %d\n"
5273             "Pacing   : %#04x\n"
5274             "CRC En?  : %d",
5275             link_status.phy_type, link_status.link_speed, 
5276             link_status.link_info, link_status.an_info,
5277             link_status.ext_info, link_status.max_frame_size,
5278             link_status.pacing, link_status.crc_enable);
5279
5280         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5281 }
5282
5283 static int
5284 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5285 {
5286         struct ixl_pf           *pf = (struct ixl_pf *)arg1;
5287         struct i40e_hw          *hw = &pf->hw;
5288         char                    buf[512];
5289         enum i40e_status_code   aq_error = 0;
5290
5291         struct i40e_aq_get_phy_abilities_resp abilities;
5292
5293         aq_error = i40e_aq_get_phy_capabilities(hw,
5294             TRUE, FALSE, &abilities, NULL);
5295         if (aq_error) {
5296                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5297                 return (EPERM);
5298         }
5299
5300         sprintf(buf, "\n"
5301             "PHY Type : %#010x\n"
5302             "Speed    : %#04x\n" 
5303             "Abilities: %#04x\n" 
5304             "EEE cap  : %#06x\n" 
5305             "EEER reg : %#010x\n" 
5306             "D3 Lpan  : %#04x",
5307             abilities.phy_type, abilities.link_speed, 
5308             abilities.abilities, abilities.eee_capability,
5309             abilities.eeer_val, abilities.d3_lpan);
5310
5311         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5312 }
5313
5314 static int
5315 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5316 {
5317         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5318         struct ixl_vsi *vsi = &pf->vsi;
5319         struct ixl_mac_filter *f;
5320         char *buf, *buf_i;
5321
5322         int error = 0;
5323         int ftl_len = 0;
5324         int ftl_counter = 0;
5325         int buf_len = 0;
5326         int entry_len = 42;
5327
5328         SLIST_FOREACH(f, &vsi->ftl, next) {
5329                 ftl_len++;
5330         }
5331
5332         if (ftl_len < 1) {
5333                 sysctl_handle_string(oidp, "(none)", 6, req);
5334                 return (0);
5335         }
5336
5337         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5338         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5339
5340         sprintf(buf_i++, "\n");
5341         SLIST_FOREACH(f, &vsi->ftl, next) {
5342                 sprintf(buf_i,
5343                     MAC_FORMAT ", vlan %4d, flags %#06x",
5344                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5345                 buf_i += entry_len;
5346                 /* don't print '\n' for last entry */
5347                 if (++ftl_counter != ftl_len) {
5348                         sprintf(buf_i, "\n");
5349                         buf_i++;
5350                 }
5351         }
5352
5353         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5354         if (error)
5355                 printf("sysctl error: %d\n", error);
5356         free(buf, M_DEVBUF);
5357         return error;
5358 }
5359
5360 #define IXL_SW_RES_SIZE 0x14
5361 static int
5362 ixl_res_alloc_cmp(const void *a, const void *b)
5363 {
5364         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5365         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5366         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5367
5368         return ((int)one->resource_type - (int)two->resource_type);
5369 }
5370
5371 /*
5372  * Longest string length: 25 
5373  */
5374 static char *
5375 ixl_switch_res_type_string(u8 type)
5376 {
5377         static char * ixl_switch_res_type_strings[0x14] = {
5378                 "VEB",
5379                 "VSI",
5380                 "Perfect Match MAC address",
5381                 "S-tag",
5382                 "(Reserved)",
5383                 "Multicast hash entry",
5384                 "Unicast hash entry",
5385                 "VLAN",
5386                 "VSI List entry",
5387                 "(Reserved)",
5388                 "VLAN Statistic Pool",
5389                 "Mirror Rule",
5390                 "Queue Set",
5391                 "Inner VLAN Forward filter",
5392                 "(Reserved)",
5393                 "Inner MAC",
5394                 "IP",
5395                 "GRE/VN1 Key",
5396                 "VN2 Key",
5397                 "Tunneling Port"
5398         };
5399
5400         if (type < 0x14)
5401                 return ixl_switch_res_type_strings[type];
5402         else
5403                 return "(Reserved)";
5404 }
5405
5406 static int
5407 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5408 {
5409         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5410         struct i40e_hw *hw = &pf->hw;
5411         device_t dev = pf->dev;
5412         struct sbuf *buf;
5413         int error = 0;
5414
5415         u8 num_entries;
5416         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5417
5418         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5419         if (!buf) {
5420                 device_printf(dev, "Could not allocate sbuf for output.\n");
5421                 return (ENOMEM);
5422         }
5423
5424         bzero(resp, sizeof(resp));
5425         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5426                                 resp,
5427                                 IXL_SW_RES_SIZE,
5428                                 NULL);
5429         if (error) {
5430                 device_printf(dev,
5431                     "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5432                     __func__, error, hw->aq.asq_last_status);
5433                 sbuf_delete(buf);
5434                 return error;
5435         }
5436
5437         /* Sort entries by type for display */
5438         qsort(resp, num_entries,
5439             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5440             &ixl_res_alloc_cmp);
5441
5442         sbuf_cat(buf, "\n");
5443         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5444         sbuf_printf(buf,
5445 #if 0
5446             "Type | Guaranteed | Total | Used   | Un-allocated\n"
5447             "     | (this)     | (all) | (this) | (all)       \n");
5448 #endif
5449             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5450             "                          | (this)     | (all) | (this) | (all)       \n");
5451         for (int i = 0; i < num_entries; i++) {
5452                 sbuf_printf(buf,
5453 #if 0
5454                     "%#4x | %10d   %5d   %6d   %12d",
5455                     resp[i].resource_type,
5456 #endif
5457                     "%25s | %10d   %5d   %6d   %12d",
5458                     ixl_switch_res_type_string(resp[i].resource_type),
5459                     resp[i].guaranteed,
5460                     resp[i].total,
5461                     resp[i].used,
5462                     resp[i].total_unalloced);
5463                 if (i < num_entries - 1)
5464                         sbuf_cat(buf, "\n");
5465         }
5466
5467         error = sbuf_finish(buf);
5468         if (error)
5469                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5470
5471         sbuf_delete(buf);
5472         return error;
5473 }
5474
5475 /*
5476 ** Caller must init and delete sbuf; this function will clear and
5477 ** finish it for caller.
5478 **
5479 ** XXX: Cannot use the SEID for this, since there is no longer a 
5480 ** fixed mapping between SEID and element type.
5481 */
5482 static char *
5483 ixl_switch_element_string(struct sbuf *s,
5484     struct i40e_aqc_switch_config_element_resp *element)
5485 {
5486         sbuf_clear(s);
5487
5488         switch (element->element_type) {
5489         case I40E_AQ_SW_ELEM_TYPE_MAC:
5490                 sbuf_printf(s, "MAC %3d", element->element_info);
5491                 break;
5492         case I40E_AQ_SW_ELEM_TYPE_PF:
5493                 sbuf_printf(s, "PF  %3d", element->element_info);
5494                 break;
5495         case I40E_AQ_SW_ELEM_TYPE_VF:
5496                 sbuf_printf(s, "VF  %3d", element->element_info);
5497                 break;
5498         case I40E_AQ_SW_ELEM_TYPE_EMP:
5499                 sbuf_cat(s, "EMP");
5500                 break;
5501         case I40E_AQ_SW_ELEM_TYPE_BMC:
5502                 sbuf_cat(s, "BMC");
5503                 break;
5504         case I40E_AQ_SW_ELEM_TYPE_PV:
5505                 sbuf_cat(s, "PV");
5506                 break;
5507         case I40E_AQ_SW_ELEM_TYPE_VEB:
5508                 sbuf_cat(s, "VEB");
5509                 break;
5510         case I40E_AQ_SW_ELEM_TYPE_PA:
5511                 sbuf_cat(s, "PA");
5512                 break;
5513         case I40E_AQ_SW_ELEM_TYPE_VSI:
5514                 sbuf_printf(s, "VSI %3d", element->element_info);
5515                 break;
5516         default:
5517                 sbuf_cat(s, "?");
5518                 break;
5519         }
5520
5521         sbuf_finish(s);
5522         return sbuf_data(s);
5523 }
5524
5525 static int
5526 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5527 {
5528         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5529         struct i40e_hw *hw = &pf->hw;
5530         device_t dev = pf->dev;
5531         struct sbuf *buf;
5532         struct sbuf *nmbuf;
5533         int error = 0;
5534         u16 next = 0;
5535         u8 aq_buf[I40E_AQ_LARGE_BUF];
5536
5537         struct i40e_aqc_get_switch_config_resp *sw_config;
5538         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5539
5540         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5541         if (!buf) {
5542                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5543                 return (ENOMEM);
5544         }
5545
5546         error = i40e_aq_get_switch_config(hw, sw_config,
5547             sizeof(aq_buf), &next, NULL);
5548         if (error) {
5549                 device_printf(dev,
5550                     "%s: aq_get_switch_config() error %d, aq error %d\n",
5551                     __func__, error, hw->aq.asq_last_status);
5552                 sbuf_delete(buf);
5553                 return error;
5554         }
5555         if (next)
5556                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5557                     __func__, next);
5558
5559         nmbuf = sbuf_new_auto();
5560         if (!nmbuf) {
5561                 device_printf(dev, "Could not allocate sbuf for name output.\n");
5562                 sbuf_delete(buf);
5563                 return (ENOMEM);
5564         }
5565
5566         sbuf_cat(buf, "\n");
5567         // Assuming <= 255 elements in switch
5568         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5569         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5570         /* Exclude:
5571         ** Revision -- all elements are revision 1 for now
5572         */
5573         sbuf_printf(buf,
5574             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5575             "                |          |          | (uplink)\n");
5576         for (int i = 0; i < sw_config->header.num_reported; i++) {
5577                 // "%4d (%8s) | %8s   %8s   %#8x",
5578                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5579                 sbuf_cat(buf, " ");
5580                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5581                     &sw_config->element[i]));
5582                 sbuf_cat(buf, " | ");
5583                 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5584                 sbuf_cat(buf, "   ");
5585                 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5586                 sbuf_cat(buf, "   ");
5587                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5588                 if (i < sw_config->header.num_reported - 1)
5589                         sbuf_cat(buf, "\n");
5590         }
5591         sbuf_delete(nmbuf);
5592
5593         error = sbuf_finish(buf);
5594         if (error)
5595                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5596
5597         sbuf_delete(buf);
5598
5599         return (error);
5600 }
5601
5602 static int
5603 ixl_debug_info(SYSCTL_HANDLER_ARGS)
5604 {
5605         struct ixl_pf   *pf;
5606         int             error, input = 0;
5607
5608         error = sysctl_handle_int(oidp, &input, 0, req);
5609
5610         if (error || !req->newptr)
5611                 return (error);
5612
5613         if (input == 1) {
5614                 pf = (struct ixl_pf *)arg1;
5615                 ixl_print_debug_info(pf);
5616         }
5617
5618         return (error);
5619 }
5620
5621 static void
5622 ixl_print_debug_info(struct ixl_pf *pf)
5623 {
5624         struct i40e_hw          *hw = &pf->hw;
5625         struct ixl_vsi          *vsi = &pf->vsi;
5626         struct ixl_queue        *que = vsi->queues;
5627         struct rx_ring          *rxr = &que->rxr;
5628         struct tx_ring          *txr = &que->txr;
5629         u32                     reg;    
5630
5631
5632         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
5633         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
5634         printf("RX next check = %x\n", rxr->next_check);
5635         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
5636         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
5637         printf("TX desc avail = %x\n", txr->avail);
5638
5639         reg = rd32(hw, I40E_GLV_GORCL(0xc));
5640          printf("RX Bytes = %x\n", reg);
5641         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
5642          printf("Port RX Bytes = %x\n", reg);
5643         reg = rd32(hw, I40E_GLV_RDPC(0xc));
5644          printf("RX discard = %x\n", reg);
5645         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
5646          printf("Port RX discard = %x\n", reg);
5647
5648         reg = rd32(hw, I40E_GLV_TEPC(0xc));
5649          printf("TX errors = %x\n", reg);
5650         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
5651          printf("TX Bytes = %x\n", reg);
5652
5653         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
5654          printf("RX undersize = %x\n", reg);
5655         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
5656          printf("RX fragments = %x\n", reg);
5657         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
5658          printf("RX oversize = %x\n", reg);
5659         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
5660          printf("RX length error = %x\n", reg);
5661         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
5662          printf("mac remote fault = %x\n", reg);
5663         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
5664          printf("mac local fault = %x\n", reg);
5665 }
5666
5667 #endif /* IXL_DEBUG_SYSCTL */
5668
5669 #ifdef PCI_IOV
5670 static int
5671 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5672 {
5673         struct i40e_hw *hw;
5674         struct ixl_vsi *vsi;
5675         struct i40e_vsi_context vsi_ctx;
5676         int i;
5677         uint16_t first_queue;
5678         enum i40e_status_code code;
5679
5680         hw = &pf->hw;
5681         vsi = &pf->vsi;
5682
5683         vsi_ctx.pf_num = hw->pf_id;
5684         vsi_ctx.uplink_seid = pf->veb_seid;
5685         vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5686         vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5687         vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5688
5689         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5690
5691         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5692         vsi_ctx.info.switch_id = htole16(0);
5693
5694         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5695         vsi_ctx.info.sec_flags = 0;
5696         if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5697                 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5698
5699         /* TODO: If a port VLAN is set, then this needs to be changed */
5700         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5701         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5702             I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5703
5704         vsi_ctx.info.valid_sections |=
5705             htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5706         vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5707         first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5708         for (i = 0; i < IXLV_MAX_QUEUES; i++)
5709                 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5710         for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5711                 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5712
5713         vsi_ctx.info.tc_mapping[0] = htole16(
5714             (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5715             (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5716
5717         code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5718         if (code != I40E_SUCCESS)
5719                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5720         vf->vsi.seid = vsi_ctx.seid;
5721         vf->vsi.vsi_num = vsi_ctx.vsi_number;
5722         vf->vsi.first_queue = first_queue;
5723         vf->vsi.num_queues = IXLV_MAX_QUEUES;
5724
5725         code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5726         if (code != I40E_SUCCESS)
5727                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5728
5729         code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5730         if (code != I40E_SUCCESS) {
5731                 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5732                     ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5733                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5734         }
5735
5736         memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5737         return (0);
5738 }
5739
5740 static int
5741 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5742 {
5743         struct i40e_hw *hw;
5744         int error;
5745
5746         hw = &pf->hw;
5747
5748         error = ixl_vf_alloc_vsi(pf, vf);
5749         if (error != 0)
5750                 return (error);
5751
5752         vf->vsi.hw_filters_add = 0;
5753         vf->vsi.hw_filters_del = 0;
5754         ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5755         ixl_reconfigure_filters(&vf->vsi);
5756
5757         return (0);
5758 }
5759
5760 static void
5761 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5762     uint32_t val)
5763 {
5764         uint32_t qtable;
5765         int index, shift;
5766
5767         /*
5768          * Two queues are mapped in a single register, so we have to do some
5769          * gymnastics to convert the queue number into a register index and
5770          * shift.
5771          */
5772         index = qnum / 2;
5773         shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5774
5775         qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5776         qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5777         qtable |= val << shift;
5778         wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5779 }
5780
5781 static void
5782 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5783 {
5784         struct i40e_hw *hw;
5785         uint32_t qtable;
5786         int i;
5787
5788         hw = &pf->hw;
5789
5790         /*
5791          * Contiguous mappings aren't actually supported by the hardware,
5792          * so we have to use non-contiguous mappings.
5793          */
5794         wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5795              I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5796
5797         wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5798             I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5799
5800         for (i = 0; i < vf->vsi.num_queues; i++) {
5801                 qtable = (vf->vsi.first_queue + i) <<
5802                     I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5803
5804                 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5805         }
5806
5807         /* Map queues allocated to VF to its VSI. */
5808         for (i = 0; i < vf->vsi.num_queues; i++)
5809                 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5810
5811         /* Set rest of VSI queues as unused. */
5812         for (; i < IXL_MAX_VSI_QUEUES; i++)
5813                 ixl_vf_map_vsi_queue(hw, vf, i,
5814                     I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5815
5816         ixl_flush(hw);
5817 }
5818
5819 static void
5820 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5821 {
5822         struct i40e_hw *hw;
5823
5824         hw = &pf->hw;
5825
5826         if (vsi->seid == 0)
5827                 return;
5828
5829         i40e_aq_delete_element(hw, vsi->seid, NULL);
5830 }
5831
5832 static void
5833 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5834 {
5835
5836         wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5837         ixl_flush(hw);
5838 }
5839
5840 static void
5841 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5842 {
5843
5844         wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5845             I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5846         ixl_flush(hw);
5847 }
5848
5849 static void
5850 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5851 {
5852         struct i40e_hw *hw;
5853         uint32_t vfint_reg, vpint_reg;
5854         int i;
5855
5856         hw = &pf->hw;
5857
5858         ixl_vf_vsi_release(pf, &vf->vsi);
5859
5860         /* Index 0 has a special register. */
5861         ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5862
5863         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5864                 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5865                 ixl_vf_disable_queue_intr(hw, vfint_reg);
5866         }
5867
5868         /* Index 0 has a special register. */
5869         ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5870
5871         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5872                 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5873                 ixl_vf_unregister_intr(hw, vpint_reg);
5874         }
5875
5876         vf->vsi.num_queues = 0;
5877 }
5878
5879 static int
5880 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5881 {
5882         struct i40e_hw *hw;
5883         int i;
5884         uint16_t global_vf_num;
5885         uint32_t ciad;
5886
5887         hw = &pf->hw;
5888         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5889
5890         wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5891              (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5892         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5893                 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5894                 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5895                         return (0);
5896                 DELAY(1);
5897         }
5898
5899         return (ETIMEDOUT);
5900 }
5901
5902 static void
5903 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5904 {
5905         struct i40e_hw *hw;
5906         uint32_t vfrtrig;
5907
5908         hw = &pf->hw;
5909
5910         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5911         vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5912         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5913         ixl_flush(hw);
5914
5915         ixl_reinit_vf(pf, vf);
5916 }
5917
5918 static void
5919 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5920 {
5921         struct i40e_hw *hw;
5922         uint32_t vfrstat, vfrtrig;
5923         int i, error;
5924
5925         hw = &pf->hw;
5926
5927         error = ixl_flush_pcie(pf, vf);
5928         if (error != 0)
5929                 device_printf(pf->dev,
5930                     "Timed out waiting for PCIe activity to stop on VF-%d\n",
5931                     vf->vf_num);
5932
5933         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5934                 DELAY(10);
5935
5936                 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5937                 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5938                         break;
5939         }
5940
5941         if (i == IXL_VF_RESET_TIMEOUT)
5942                 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5943
5944         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5945
5946         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5947         vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5948         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5949
5950         if (vf->vsi.seid != 0)
5951                 ixl_disable_rings(&vf->vsi);
5952
5953         ixl_vf_release_resources(pf, vf);
5954         ixl_vf_setup_vsi(pf, vf);
5955         ixl_vf_map_queues(pf, vf);
5956
5957         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5958         ixl_flush(hw);
5959 }
5960
5961 static const char *
5962 ixl_vc_opcode_str(uint16_t op)
5963 {
5964
5965         switch (op) {
5966         case I40E_VIRTCHNL_OP_VERSION:
5967                 return ("VERSION");
5968         case I40E_VIRTCHNL_OP_RESET_VF:
5969                 return ("RESET_VF");
5970         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5971                 return ("GET_VF_RESOURCES");
5972         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5973                 return ("CONFIG_TX_QUEUE");
5974         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5975                 return ("CONFIG_RX_QUEUE");
5976         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5977                 return ("CONFIG_VSI_QUEUES");
5978         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5979                 return ("CONFIG_IRQ_MAP");
5980         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5981                 return ("ENABLE_QUEUES");
5982         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5983                 return ("DISABLE_QUEUES");
5984         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5985                 return ("ADD_ETHER_ADDRESS");
5986         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5987                 return ("DEL_ETHER_ADDRESS");
5988         case I40E_VIRTCHNL_OP_ADD_VLAN:
5989                 return ("ADD_VLAN");
5990         case I40E_VIRTCHNL_OP_DEL_VLAN:
5991                 return ("DEL_VLAN");
5992         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5993                 return ("CONFIG_PROMISCUOUS_MODE");
5994         case I40E_VIRTCHNL_OP_GET_STATS:
5995                 return ("GET_STATS");
5996         case I40E_VIRTCHNL_OP_FCOE:
5997                 return ("FCOE");
5998         case I40E_VIRTCHNL_OP_EVENT:
5999                 return ("EVENT");
6000         default:
6001                 return ("UNKNOWN");
6002         }
6003 }
6004
6005 static int
6006 ixl_vc_opcode_level(uint16_t opcode)
6007 {
6008         switch (opcode) {
6009         case I40E_VIRTCHNL_OP_GET_STATS:
6010                 return (10);
6011         default:
6012                 return (5);
6013         }
6014 }
6015
6016 static void
6017 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6018     enum i40e_status_code status, void *msg, uint16_t len)
6019 {
6020         struct i40e_hw *hw;
6021         int global_vf_id;
6022
6023         hw = &pf->hw;
6024         global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
6025
6026         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
6027             "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
6028             ixl_vc_opcode_str(op), op, status, vf->vf_num);
6029
6030         i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
6031 }
6032
6033 static void
6034 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
6035 {
6036
6037         ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
6038 }
6039
6040 static void
6041 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6042     enum i40e_status_code status, const char *file, int line)
6043 {
6044
6045         I40E_VC_DEBUG(pf, 1,
6046             "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
6047             ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
6048         ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
6049 }
6050
6051 static void
6052 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6053     uint16_t msg_size)
6054 {
6055         struct i40e_virtchnl_version_info reply;
6056
6057         if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
6058                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
6059                     I40E_ERR_PARAM);
6060                 return;
6061         }
6062
6063         vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
6064
6065         reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
6066         reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
6067         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
6068             sizeof(reply));
6069 }
6070
6071 static void
6072 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6073     uint16_t msg_size)
6074 {
6075
6076         if (msg_size != 0) {
6077                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
6078                     I40E_ERR_PARAM);
6079                 return;
6080         }
6081
6082         ixl_reset_vf(pf, vf);
6083
6084         /* No response to a reset message. */
6085 }
6086
6087 static void
6088 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6089     uint16_t msg_size)
6090 {
6091         struct i40e_virtchnl_vf_resource reply;
6092
6093         if ((vf->version == 0 && msg_size != 0) ||
6094             (vf->version == 1 && msg_size != 4)) {
6095                 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
6096                     " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
6097                     vf->version);
6098                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6099                     I40E_ERR_PARAM);
6100                 return;
6101         }
6102
6103         bzero(&reply, sizeof(reply));
6104
6105         if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
6106                 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
6107                                          I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
6108                                          I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
6109         else
6110                 reply.vf_offload_flags = *(u32 *)msg;
6111
6112         reply.num_vsis = 1;
6113         reply.num_queue_pairs = vf->vsi.num_queues;
6114         reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
6115         reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
6116         reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
6117         reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
6118         memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
6119
6120         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6121             I40E_SUCCESS, &reply, sizeof(reply));
6122 }
6123
6124 static int
6125 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6126     struct i40e_virtchnl_txq_info *info)
6127 {
6128         struct i40e_hw *hw;
6129         struct i40e_hmc_obj_txq txq;
6130         uint16_t global_queue_num, global_vf_num;
6131         enum i40e_status_code status;
6132         uint32_t qtx_ctl;
6133
6134         hw = &pf->hw;
6135         global_queue_num = vf->vsi.first_queue + info->queue_id;
6136         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6137         bzero(&txq, sizeof(txq));
6138
6139         status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
6140         if (status != I40E_SUCCESS)
6141                 return (EINVAL);
6142
6143         txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
6144
6145         txq.head_wb_ena = info->headwb_enabled;
6146         txq.head_wb_addr = info->dma_headwb_addr;
6147         txq.qlen = info->ring_len;
6148         txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
6149         txq.rdylist_act = 0;
6150
6151         status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
6152         if (status != I40E_SUCCESS)
6153                 return (EINVAL);
6154
6155         qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
6156             (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
6157             (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
6158         wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
6159         ixl_flush(hw);
6160
6161         return (0);
6162 }
6163
6164 static int
6165 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6166     struct i40e_virtchnl_rxq_info *info)
6167 {
6168         struct i40e_hw *hw;
6169         struct i40e_hmc_obj_rxq rxq;
6170         uint16_t global_queue_num;
6171         enum i40e_status_code status;
6172
6173         hw = &pf->hw;
6174         global_queue_num = vf->vsi.first_queue + info->queue_id;
6175         bzero(&rxq, sizeof(rxq));
6176
6177         if (info->databuffer_size > IXL_VF_MAX_BUFFER)
6178                 return (EINVAL);
6179
6180         if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
6181             info->max_pkt_size < ETHER_MIN_LEN)
6182                 return (EINVAL);
6183
6184         if (info->splithdr_enabled) {
6185                 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
6186                         return (EINVAL);
6187
6188                 rxq.hsplit_0 = info->rx_split_pos &
6189                     (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
6190                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
6191                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
6192                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
6193                 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
6194
6195                 rxq.dtype = 2;
6196         }
6197
6198         status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
6199         if (status != I40E_SUCCESS)
6200                 return (EINVAL);
6201
6202         rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
6203         rxq.qlen = info->ring_len;
6204
6205         rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
6206
6207         rxq.dsize = 1;
6208         rxq.crcstrip = 1;
6209         rxq.l2tsel = 1;
6210
6211         rxq.rxmax = info->max_pkt_size;
6212         rxq.tphrdesc_ena = 1;
6213         rxq.tphwdesc_ena = 1;
6214         rxq.tphdata_ena = 1;
6215         rxq.tphhead_ena = 1;
6216         rxq.lrxqthresh = 2;
6217         rxq.prefena = 1;
6218
6219         status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
6220         if (status != I40E_SUCCESS)
6221                 return (EINVAL);
6222
6223         return (0);
6224 }
6225
6226 static void
6227 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6228     uint16_t msg_size)
6229 {
6230         struct i40e_virtchnl_vsi_queue_config_info *info;
6231         struct i40e_virtchnl_queue_pair_info *pair;
6232         int i;
6233
6234         if (msg_size < sizeof(*info)) {
6235                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6236                     I40E_ERR_PARAM);
6237                 return;
6238         }
6239
6240         info = msg;
6241         if (info->num_queue_pairs == 0) {
6242                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6243                     I40E_ERR_PARAM);
6244                 return;
6245         }
6246
6247         if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
6248                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6249                     I40E_ERR_PARAM);
6250                 return;
6251         }
6252
6253         if (info->vsi_id != vf->vsi.vsi_num) {
6254                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6255                     I40E_ERR_PARAM);
6256                 return;
6257         }
6258
6259         for (i = 0; i < info->num_queue_pairs; i++) {
6260                 pair = &info->qpair[i];
6261
6262                 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
6263                     pair->rxq.vsi_id != vf->vsi.vsi_num ||
6264                     pair->txq.queue_id != pair->rxq.queue_id ||
6265                     pair->txq.queue_id >= vf->vsi.num_queues) {
6266
6267                         i40e_send_vf_nack(pf, vf,
6268                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6269                         return;
6270                 }
6271
6272                 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
6273                         i40e_send_vf_nack(pf, vf,
6274                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6275                         return;
6276                 }
6277
6278                 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
6279                         i40e_send_vf_nack(pf, vf,
6280                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6281                         return;
6282                 }
6283         }
6284
6285         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
6286 }
6287
6288 static void
6289 ixl_vf_set_qctl(struct ixl_pf *pf,
6290     const struct i40e_virtchnl_vector_map *vector,
6291     enum i40e_queue_type cur_type, uint16_t cur_queue,
6292     enum i40e_queue_type *last_type, uint16_t *last_queue)
6293 {
6294         uint32_t offset, qctl;
6295         uint16_t itr_indx;
6296
6297         if (cur_type == I40E_QUEUE_TYPE_RX) {
6298                 offset = I40E_QINT_RQCTL(cur_queue);
6299                 itr_indx = vector->rxitr_idx;
6300         } else {
6301                 offset = I40E_QINT_TQCTL(cur_queue);
6302                 itr_indx = vector->txitr_idx;
6303         }
6304
6305         qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
6306             (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
6307             (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
6308             I40E_QINT_RQCTL_CAUSE_ENA_MASK |
6309             (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
6310
6311         wr32(&pf->hw, offset, qctl);
6312
6313         *last_type = cur_type;
6314         *last_queue = cur_queue;
6315 }
6316
6317 static void
6318 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
6319     const struct i40e_virtchnl_vector_map *vector)
6320 {
6321         struct i40e_hw *hw;
6322         u_int qindex;
6323         enum i40e_queue_type type, last_type;
6324         uint32_t lnklst_reg;
6325         uint16_t rxq_map, txq_map, cur_queue, last_queue;
6326
6327         hw = &pf->hw;
6328
6329         rxq_map = vector->rxq_map;
6330         txq_map = vector->txq_map;
6331
6332         last_queue = IXL_END_OF_INTR_LNKLST;
6333         last_type = I40E_QUEUE_TYPE_RX;
6334
6335         /*
6336          * The datasheet says to optimize performance, RX queues and TX queues
6337          * should be interleaved in the interrupt linked list, so we process
6338          * both at once here.
6339          */
6340         while ((rxq_map != 0) || (txq_map != 0)) {
6341                 if (txq_map != 0) {
6342                         qindex = ffs(txq_map) - 1;
6343                         type = I40E_QUEUE_TYPE_TX;
6344                         cur_queue = vf->vsi.first_queue + qindex;
6345                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
6346                             &last_type, &last_queue);
6347                         txq_map &= ~(1 << qindex);
6348                 }
6349
6350                 if (rxq_map != 0) {
6351                         qindex = ffs(rxq_map) - 1;
6352                         type = I40E_QUEUE_TYPE_RX;
6353                         cur_queue = vf->vsi.first_queue + qindex;
6354                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
6355                             &last_type, &last_queue);
6356                         rxq_map &= ~(1 << qindex);
6357                 }
6358         }
6359
6360         if (vector->vector_id == 0)
6361                 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
6362         else
6363                 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
6364                     vf->vf_num);
6365         wr32(hw, lnklst_reg,
6366             (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
6367             (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
6368
6369         ixl_flush(hw);
6370 }
6371
6372 static void
6373 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6374     uint16_t msg_size)
6375 {
6376         struct i40e_virtchnl_irq_map_info *map;
6377         struct i40e_virtchnl_vector_map *vector;
6378         struct i40e_hw *hw;
6379         int i, largest_txq, largest_rxq;
6380
6381         hw = &pf->hw;
6382
6383         if (msg_size < sizeof(*map)) {
6384                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6385                     I40E_ERR_PARAM);
6386                 return;
6387         }
6388
6389         map = msg;
6390         if (map->num_vectors == 0) {
6391                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6392                     I40E_ERR_PARAM);
6393                 return;
6394         }
6395
6396         if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
6397                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6398                     I40E_ERR_PARAM);
6399                 return;
6400         }
6401
6402         for (i = 0; i < map->num_vectors; i++) {
6403                 vector = &map->vecmap[i];
6404
6405                 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6406                     vector->vsi_id != vf->vsi.vsi_num) {
6407                         i40e_send_vf_nack(pf, vf,
6408                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6409                         return;
6410                 }
6411
6412                 if (vector->rxq_map != 0) {
6413                         largest_rxq = fls(vector->rxq_map) - 1;
6414                         if (largest_rxq >= vf->vsi.num_queues) {
6415                                 i40e_send_vf_nack(pf, vf,
6416                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6417                                     I40E_ERR_PARAM);
6418                                 return;
6419                         }
6420                 }
6421
6422                 if (vector->txq_map != 0) {
6423                         largest_txq = fls(vector->txq_map) - 1;
6424                         if (largest_txq >= vf->vsi.num_queues) {
6425                                 i40e_send_vf_nack(pf, vf,
6426                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6427                                     I40E_ERR_PARAM);
6428                                 return;
6429                         }
6430                 }
6431
6432                 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6433                     vector->txitr_idx > IXL_MAX_ITR_IDX) {
6434                         i40e_send_vf_nack(pf, vf,
6435                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6436                             I40E_ERR_PARAM);
6437                         return;
6438                 }
6439
6440                 ixl_vf_config_vector(pf, vf, vector);
6441         }
6442
6443         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6444 }
6445
6446 static void
6447 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6448     uint16_t msg_size)
6449 {
6450         struct i40e_virtchnl_queue_select *select;
6451         int error;
6452
6453         if (msg_size != sizeof(*select)) {
6454                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6455                     I40E_ERR_PARAM);
6456                 return;
6457         }
6458
6459         select = msg;
6460         if (select->vsi_id != vf->vsi.vsi_num ||
6461             select->rx_queues == 0 || select->tx_queues == 0) {
6462                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6463                     I40E_ERR_PARAM);
6464                 return;
6465         }
6466
6467         error = ixl_enable_rings(&vf->vsi);
6468         if (error) {
6469                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6470                     I40E_ERR_TIMEOUT);
6471                 return;
6472         }
6473
6474         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6475 }
6476
6477 static void
6478 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6479     void *msg, uint16_t msg_size)
6480 {
6481         struct i40e_virtchnl_queue_select *select;
6482         int error;
6483
6484         if (msg_size != sizeof(*select)) {
6485                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6486                     I40E_ERR_PARAM);
6487                 return;
6488         }
6489
6490         select = msg;
6491         if (select->vsi_id != vf->vsi.vsi_num ||
6492             select->rx_queues == 0 || select->tx_queues == 0) {
6493                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6494                     I40E_ERR_PARAM);
6495                 return;
6496         }
6497
6498         error = ixl_disable_rings(&vf->vsi);
6499         if (error) {
6500                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6501                     I40E_ERR_TIMEOUT);
6502                 return;
6503         }
6504
6505         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6506 }
6507
6508 static boolean_t
6509 ixl_zero_mac(const uint8_t *addr)
6510 {
6511         uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6512
6513         return (cmp_etheraddr(addr, zero));
6514 }
6515
6516 static boolean_t
6517 ixl_bcast_mac(const uint8_t *addr)
6518 {
6519
6520         return (cmp_etheraddr(addr, ixl_bcast_addr));
6521 }
6522
6523 static int
6524 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6525 {
6526
6527         if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6528                 return (EINVAL);
6529
6530         /*
6531          * If the VF is not allowed to change its MAC address, don't let it
6532          * set a MAC filter for an address that is not a multicast address and
6533          * is not its assigned MAC.
6534          */
6535         if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6536             !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6537                 return (EPERM);
6538
6539         return (0);
6540 }
6541
6542 static void
6543 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6544     uint16_t msg_size)
6545 {
6546         struct i40e_virtchnl_ether_addr_list *addr_list;
6547         struct i40e_virtchnl_ether_addr *addr;
6548         struct ixl_vsi *vsi;
6549         int i;
6550         size_t expected_size;
6551
6552         vsi = &vf->vsi;
6553
6554         if (msg_size < sizeof(*addr_list)) {
6555                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6556                     I40E_ERR_PARAM);
6557                 return;
6558         }
6559
6560         addr_list = msg;
6561         expected_size = sizeof(*addr_list) +
6562             addr_list->num_elements * sizeof(*addr);
6563
6564         if (addr_list->num_elements == 0 ||
6565             addr_list->vsi_id != vsi->vsi_num ||
6566             msg_size != expected_size) {
6567                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6568                     I40E_ERR_PARAM);
6569                 return;
6570         }
6571
6572         for (i = 0; i < addr_list->num_elements; i++) {
6573                 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6574                         i40e_send_vf_nack(pf, vf,
6575                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6576                         return;
6577                 }
6578         }
6579
6580         for (i = 0; i < addr_list->num_elements; i++) {
6581                 addr = &addr_list->list[i];
6582                 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6583         }
6584
6585         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6586 }
6587
6588 static void
6589 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6590     uint16_t msg_size)
6591 {
6592         struct i40e_virtchnl_ether_addr_list *addr_list;
6593         struct i40e_virtchnl_ether_addr *addr;
6594         size_t expected_size;
6595         int i;
6596
6597         if (msg_size < sizeof(*addr_list)) {
6598                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6599                     I40E_ERR_PARAM);
6600                 return;
6601         }
6602
6603         addr_list = msg;
6604         expected_size = sizeof(*addr_list) +
6605             addr_list->num_elements * sizeof(*addr);
6606
6607         if (addr_list->num_elements == 0 ||
6608             addr_list->vsi_id != vf->vsi.vsi_num ||
6609             msg_size != expected_size) {
6610                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6611                     I40E_ERR_PARAM);
6612                 return;
6613         }
6614
6615         for (i = 0; i < addr_list->num_elements; i++) {
6616                 addr = &addr_list->list[i];
6617                 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6618                         i40e_send_vf_nack(pf, vf,
6619                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6620                         return;
6621                 }
6622         }
6623
6624         for (i = 0; i < addr_list->num_elements; i++) {
6625                 addr = &addr_list->list[i];
6626                 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6627         }
6628
6629         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6630 }
6631
6632 static enum i40e_status_code
6633 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6634 {
6635         struct i40e_vsi_context vsi_ctx;
6636
6637         vsi_ctx.seid = vf->vsi.seid;
6638
6639         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6640         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6641         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6642             I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6643         return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6644 }
6645
6646 static void
6647 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6648     uint16_t msg_size)
6649 {
6650         struct i40e_virtchnl_vlan_filter_list *filter_list;
6651         enum i40e_status_code code;
6652         size_t expected_size;
6653         int i;
6654
6655         if (msg_size < sizeof(*filter_list)) {
6656                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6657                     I40E_ERR_PARAM);
6658                 return;
6659         }
6660
6661         filter_list = msg;
6662         expected_size = sizeof(*filter_list) +
6663             filter_list->num_elements * sizeof(uint16_t);
6664         if (filter_list->num_elements == 0 ||
6665             filter_list->vsi_id != vf->vsi.vsi_num ||
6666             msg_size != expected_size) {
6667                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6668                     I40E_ERR_PARAM);
6669                 return;
6670         }
6671
6672         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6673                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6674                     I40E_ERR_PARAM);
6675                 return;
6676         }
6677
6678         for (i = 0; i < filter_list->num_elements; i++) {
6679                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6680                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6681                             I40E_ERR_PARAM);
6682                         return;
6683                 }
6684         }
6685
6686         code = ixl_vf_enable_vlan_strip(pf, vf);
6687         if (code != I40E_SUCCESS) {
6688                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6689                     I40E_ERR_PARAM);
6690         }
6691
6692         for (i = 0; i < filter_list->num_elements; i++)
6693                 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6694
6695         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6696 }
6697
6698 static void
6699 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6700     uint16_t msg_size)
6701 {
6702         struct i40e_virtchnl_vlan_filter_list *filter_list;
6703         int i;
6704         size_t expected_size;
6705
6706         if (msg_size < sizeof(*filter_list)) {
6707                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6708                     I40E_ERR_PARAM);
6709                 return;
6710         }
6711
6712         filter_list = msg;
6713         expected_size = sizeof(*filter_list) +
6714             filter_list->num_elements * sizeof(uint16_t);
6715         if (filter_list->num_elements == 0 ||
6716             filter_list->vsi_id != vf->vsi.vsi_num ||
6717             msg_size != expected_size) {
6718                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6719                     I40E_ERR_PARAM);
6720                 return;
6721         }
6722
6723         for (i = 0; i < filter_list->num_elements; i++) {
6724                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6725                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6726                             I40E_ERR_PARAM);
6727                         return;
6728                 }
6729         }
6730
6731         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6732                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6733                     I40E_ERR_PARAM);
6734                 return;
6735         }
6736
6737         for (i = 0; i < filter_list->num_elements; i++)
6738                 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6739
6740         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6741 }
6742
6743 static void
6744 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6745     void *msg, uint16_t msg_size)
6746 {
6747         struct i40e_virtchnl_promisc_info *info;
6748         enum i40e_status_code code;
6749
6750         if (msg_size != sizeof(*info)) {
6751                 i40e_send_vf_nack(pf, vf,
6752                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6753                 return;
6754         }
6755
6756         if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6757                 i40e_send_vf_nack(pf, vf,
6758                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6759                 return;
6760         }
6761
6762         info = msg;
6763         if (info->vsi_id != vf->vsi.vsi_num) {
6764                 i40e_send_vf_nack(pf, vf,
6765                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6766                 return;
6767         }
6768
6769         code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6770             info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6771         if (code != I40E_SUCCESS) {
6772                 i40e_send_vf_nack(pf, vf,
6773                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6774                 return;
6775         }
6776
6777         code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6778             info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6779         if (code != I40E_SUCCESS) {
6780                 i40e_send_vf_nack(pf, vf,
6781                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6782                 return;
6783         }
6784
6785         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6786 }
6787
6788 static void
6789 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6790     uint16_t msg_size)
6791 {
6792         struct i40e_virtchnl_queue_select *queue;
6793
6794         if (msg_size != sizeof(*queue)) {
6795                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6796                     I40E_ERR_PARAM);
6797                 return;
6798         }
6799
6800         queue = msg;
6801         if (queue->vsi_id != vf->vsi.vsi_num) {
6802                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6803                     I40E_ERR_PARAM);
6804                 return;
6805         }
6806
6807         ixl_update_eth_stats(&vf->vsi);
6808
6809         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6810             I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6811 }
6812
6813 static void
6814 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6815 {
6816         struct ixl_vf *vf;
6817         void *msg;
6818         uint16_t vf_num, msg_size;
6819         uint32_t opcode;
6820
6821         vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6822         opcode = le32toh(event->desc.cookie_high);
6823
6824         if (vf_num >= pf->num_vfs) {
6825                 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6826                 return;
6827         }
6828
6829         vf = &pf->vfs[vf_num];
6830         msg = event->msg_buf;
6831         msg_size = event->msg_len;
6832
6833         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6834             "Got msg %s(%d) from VF-%d of size %d\n",
6835             ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6836
6837         switch (opcode) {
6838         case I40E_VIRTCHNL_OP_VERSION:
6839                 ixl_vf_version_msg(pf, vf, msg, msg_size);
6840                 break;
6841         case I40E_VIRTCHNL_OP_RESET_VF:
6842                 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6843                 break;
6844         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6845                 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6846                 break;
6847         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6848                 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6849                 break;
6850         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6851                 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6852                 break;
6853         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6854                 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6855                 break;
6856         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6857                 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6858                 break;
6859         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6860                 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6861                 break;
6862         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6863                 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6864                 break;
6865         case I40E_VIRTCHNL_OP_ADD_VLAN:
6866                 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6867                 break;
6868         case I40E_VIRTCHNL_OP_DEL_VLAN:
6869                 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6870                 break;
6871         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6872                 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6873                 break;
6874         case I40E_VIRTCHNL_OP_GET_STATS:
6875                 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6876                 break;
6877
6878         /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6879         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6880         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6881         default:
6882                 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6883                 break;
6884         }
6885 }
6886
6887 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6888 static void
6889 ixl_handle_vflr(void *arg, int pending)
6890 {
6891         struct ixl_pf *pf;
6892         struct i40e_hw *hw;
6893         uint16_t global_vf_num;
6894         uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6895         int i;
6896
6897         pf = arg;
6898         hw = &pf->hw;
6899
6900         IXL_PF_LOCK(pf);
6901         for (i = 0; i < pf->num_vfs; i++) {
6902                 global_vf_num = hw->func_caps.vf_base_id + i;
6903
6904                 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6905                 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6906                 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6907                 if (vflrstat & vflrstat_mask) {
6908                         wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6909                             vflrstat_mask);
6910
6911                         ixl_reinit_vf(pf, &pf->vfs[i]);
6912                 }
6913         }
6914
6915         icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6916         icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6917         wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6918         ixl_flush(hw);
6919
6920         IXL_PF_UNLOCK(pf);
6921 }
6922
6923 static int
6924 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6925 {
6926
6927         switch (err) {
6928         case I40E_AQ_RC_EPERM:
6929                 return (EPERM);
6930         case I40E_AQ_RC_ENOENT:
6931                 return (ENOENT);
6932         case I40E_AQ_RC_ESRCH:
6933                 return (ESRCH);
6934         case I40E_AQ_RC_EINTR:
6935                 return (EINTR);
6936         case I40E_AQ_RC_EIO:
6937                 return (EIO);
6938         case I40E_AQ_RC_ENXIO:
6939                 return (ENXIO);
6940         case I40E_AQ_RC_E2BIG:
6941                 return (E2BIG);
6942         case I40E_AQ_RC_EAGAIN:
6943                 return (EAGAIN);
6944         case I40E_AQ_RC_ENOMEM:
6945                 return (ENOMEM);
6946         case I40E_AQ_RC_EACCES:
6947                 return (EACCES);
6948         case I40E_AQ_RC_EFAULT:
6949                 return (EFAULT);
6950         case I40E_AQ_RC_EBUSY:
6951                 return (EBUSY);
6952         case I40E_AQ_RC_EEXIST:
6953                 return (EEXIST);
6954         case I40E_AQ_RC_EINVAL:
6955                 return (EINVAL);
6956         case I40E_AQ_RC_ENOTTY:
6957                 return (ENOTTY);
6958         case I40E_AQ_RC_ENOSPC:
6959                 return (ENOSPC);
6960         case I40E_AQ_RC_ENOSYS:
6961                 return (ENOSYS);
6962         case I40E_AQ_RC_ERANGE:
6963                 return (ERANGE);
6964         case I40E_AQ_RC_EFLUSHED:
6965                 return (EINVAL);        /* No exact equivalent in errno.h */
6966         case I40E_AQ_RC_BAD_ADDR:
6967                 return (EFAULT);
6968         case I40E_AQ_RC_EMODE:
6969                 return (EPERM);
6970         case I40E_AQ_RC_EFBIG:
6971                 return (EFBIG);
6972         default:
6973                 return (EINVAL);
6974         }
6975 }
6976
6977 static int
6978 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6979 {
6980         struct ixl_pf *pf;
6981         struct i40e_hw *hw;
6982         struct ixl_vsi *pf_vsi;
6983         enum i40e_status_code ret;
6984         int i, error;
6985
6986         pf = device_get_softc(dev);
6987         hw = &pf->hw;
6988         pf_vsi = &pf->vsi;
6989
6990         IXL_PF_LOCK(pf);
6991         pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6992             M_ZERO);
6993
6994         if (pf->vfs == NULL) {
6995                 error = ENOMEM;
6996                 goto fail;
6997         }
6998
6999         for (i = 0; i < num_vfs; i++)
7000                 sysctl_ctx_init(&pf->vfs[i].ctx);
7001
7002         ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
7003             1, FALSE, &pf->veb_seid, FALSE, NULL);
7004         if (ret != I40E_SUCCESS) {
7005                 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
7006                 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
7007                     error);
7008                 goto fail;
7009         }
7010
7011         ixl_configure_msix(pf);
7012         ixl_enable_adminq(hw);
7013
7014         pf->num_vfs = num_vfs;
7015         IXL_PF_UNLOCK(pf);
7016         return (0);
7017
7018 fail:
7019         free(pf->vfs, M_IXL);
7020         pf->vfs = NULL;
7021         IXL_PF_UNLOCK(pf);
7022         return (error);
7023 }
7024
7025 static void
7026 ixl_iov_uninit(device_t dev)
7027 {
7028         struct ixl_pf *pf;
7029         struct i40e_hw *hw;
7030         struct ixl_vsi *vsi;
7031         struct ifnet *ifp;
7032         struct ixl_vf *vfs;
7033         int i, num_vfs;
7034
7035         pf = device_get_softc(dev);
7036         hw = &pf->hw;
7037         vsi = &pf->vsi;
7038         ifp = vsi->ifp;
7039
7040         IXL_PF_LOCK(pf);
7041         for (i = 0; i < pf->num_vfs; i++) {
7042                 if (pf->vfs[i].vsi.seid != 0)
7043                         i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
7044         }
7045
7046         if (pf->veb_seid != 0) {
7047                 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
7048                 pf->veb_seid = 0;
7049         }
7050
7051         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
7052                 ixl_disable_intr(vsi);
7053
7054         vfs = pf->vfs;
7055         num_vfs = pf->num_vfs;
7056
7057         pf->vfs = NULL;
7058         pf->num_vfs = 0;
7059         IXL_PF_UNLOCK(pf);
7060
7061         /* Do this after the unlock as sysctl_ctx_free might sleep. */
7062         for (i = 0; i < num_vfs; i++)
7063                 sysctl_ctx_free(&vfs[i].ctx);
7064         free(vfs, M_IXL);
7065 }
7066
7067 static int
7068 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
7069 {
7070         char sysctl_name[QUEUE_NAME_LEN];
7071         struct ixl_pf *pf;
7072         struct ixl_vf *vf;
7073         const void *mac;
7074         size_t size;
7075         int error;
7076
7077         pf = device_get_softc(dev);
7078         vf = &pf->vfs[vfnum];
7079
7080         IXL_PF_LOCK(pf);
7081         vf->vf_num = vfnum;
7082
7083         vf->vsi.back = pf;
7084         vf->vf_flags = VF_FLAG_ENABLED;
7085         SLIST_INIT(&vf->vsi.ftl);
7086
7087         error = ixl_vf_setup_vsi(pf, vf);
7088         if (error != 0)
7089                 goto out;
7090
7091         if (nvlist_exists_binary(params, "mac-addr")) {
7092                 mac = nvlist_get_binary(params, "mac-addr", &size);
7093                 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
7094
7095                 if (nvlist_get_bool(params, "allow-set-mac"))
7096                         vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7097         } else
7098                 /*
7099                  * If the administrator has not specified a MAC address then
7100                  * we must allow the VF to choose one.
7101                  */
7102                 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7103
7104         if (nvlist_get_bool(params, "mac-anti-spoof"))
7105                 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
7106
7107         if (nvlist_get_bool(params, "allow-promisc"))
7108                 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
7109
7110         /* TODO: Get VLAN that PF has set for the VF */
7111
7112         vf->vf_flags |= VF_FLAG_VLAN_CAP;
7113
7114         ixl_reset_vf(pf, vf);
7115 out:
7116         IXL_PF_UNLOCK(pf);
7117         if (error == 0) {
7118                 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
7119                 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
7120         }
7121
7122         return (error);
7123 }
7124 #endif /* PCI_IOV */