]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/ixl/if_ixl.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #endif
39
40 #include "ixl.h"
41 #include "ixl_pf.h"
42
43 #ifdef RSS
44 #include <net/rss_config.h>
45 #endif
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 char ixl_driver_version[] = "1.4.3";
51
52 /*********************************************************************
53  *  PCI Device ID Table
54  *
55  *  Used by probe to select devices to load on
56  *  Last field stores an index into ixl_strings
57  *  Last entry must be all 0s
58  *
59  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60  *********************************************************************/
61
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
63 {
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
75 #ifdef X722_SUPPORT
76         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
77         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
78         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
79 #endif
80         /* required last entry */
81         {0, 0, 0, 0, 0}
82 };
83
84 /*********************************************************************
85  *  Table of branding strings
86  *********************************************************************/
87
88 static char    *ixl_strings[] = {
89         "Intel(R) Ethernet Connection XL710 Driver"
90 };
91
92
93 /*********************************************************************
94  *  Function prototypes
95  *********************************************************************/
96 static int      ixl_probe(device_t);
97 static int      ixl_attach(device_t);
98 static int      ixl_detach(device_t);
99 static int      ixl_shutdown(device_t);
100 static int      ixl_get_hw_capabilities(struct ixl_pf *);
101 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
102 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void     ixl_init(void *);
104 static void     ixl_init_locked(struct ixl_pf *);
105 static void     ixl_stop(struct ixl_pf *);
106 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
107 static int      ixl_media_change(struct ifnet *);
108 static void     ixl_update_link_status(struct ixl_pf *);
109 static int      ixl_allocate_pci_resources(struct ixl_pf *);
110 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
111 static int      ixl_setup_stations(struct ixl_pf *);
112 static int      ixl_switch_config(struct ixl_pf *);
113 static int      ixl_initialize_vsi(struct ixl_vsi *);
114 static int      ixl_assign_vsi_msix(struct ixl_pf *);
115 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
116 static int      ixl_init_msix(struct ixl_pf *);
117 static void     ixl_configure_msix(struct ixl_pf *);
118 static void     ixl_configure_itr(struct ixl_pf *);
119 static void     ixl_configure_legacy(struct ixl_pf *);
120 static void     ixl_free_pci_resources(struct ixl_pf *);
121 static void     ixl_local_timer(void *);
122 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
123 static void     ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124 static void     ixl_config_rss(struct ixl_vsi *);
125 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
126 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
127 static int      ixl_set_advertised_speeds(struct ixl_pf *, int);
128
129 static int      ixl_enable_rings(struct ixl_vsi *);
130 static int      ixl_disable_rings(struct ixl_vsi *);
131 static void     ixl_enable_intr(struct ixl_vsi *);
132 static void     ixl_disable_intr(struct ixl_vsi *);
133 static void     ixl_disable_rings_intr(struct ixl_vsi *);
134
135 static void     ixl_enable_adminq(struct i40e_hw *);
136 static void     ixl_disable_adminq(struct i40e_hw *);
137 static void     ixl_enable_queue(struct i40e_hw *, int);
138 static void     ixl_disable_queue(struct i40e_hw *, int);
139 static void     ixl_enable_legacy(struct i40e_hw *);
140 static void     ixl_disable_legacy(struct i40e_hw *);
141
142 static void     ixl_set_promisc(struct ixl_vsi *);
143 static void     ixl_add_multi(struct ixl_vsi *);
144 static void     ixl_del_multi(struct ixl_vsi *);
145 static void     ixl_register_vlan(void *, struct ifnet *, u16);
146 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
147 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
148
149 static void     ixl_init_filters(struct ixl_vsi *);
150 static void     ixl_reconfigure_filters(struct ixl_vsi *vsi);
151 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
154 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
155 static struct ixl_mac_filter *
156                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158 static void     ixl_free_mac_filters(struct ixl_vsi *vsi);
159
160
161 /* Sysctl debug interface */
162 #ifdef IXL_DEBUG_SYSCTL
163 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
164 static void     ixl_print_debug_info(struct ixl_pf *);
165 #endif
166
167 /* The MSI/X Interrupt handlers */
168 static void     ixl_intr(void *);
169 static void     ixl_msix_que(void *);
170 static void     ixl_msix_adminq(void *);
171 static void     ixl_handle_mdd_event(struct ixl_pf *);
172
173 /* Deferred interrupt tasklets */
174 static void     ixl_do_adminq(void *, int);
175
176 /* Sysctl handlers */
177 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
178 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
179 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
180 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
181
182 /* Statistics */
183 static void     ixl_add_hw_stats(struct ixl_pf *);
184 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
185                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
186 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
187                     struct sysctl_oid_list *,
188                     struct i40e_eth_stats *);
189 static void     ixl_update_stats_counters(struct ixl_pf *);
190 static void     ixl_update_eth_stats(struct ixl_vsi *);
191 static void     ixl_update_vsi_stats(struct ixl_vsi *);
192 static void     ixl_pf_reset_stats(struct ixl_pf *);
193 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
194 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
195                     u64 *, u64 *);
196 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
197                     u64 *, u64 *);
198
199 #ifdef IXL_DEBUG_SYSCTL
200 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
201 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
202 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
203 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
204 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
205 #endif
206
207 #ifdef PCI_IOV
208 static int      ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
209
210 static int      ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
211 static void     ixl_uninit_iov(device_t dev);
212 static int      ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
213
214 static void     ixl_handle_vf_msg(struct ixl_pf *,
215                     struct i40e_arq_event_info *);
216 static void     ixl_handle_vflr(void *arg, int pending);
217
218 static void     ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
219 static void     ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
220 #endif
221
222 /*********************************************************************
223  *  FreeBSD Device Interface Entry Points
224  *********************************************************************/
225
226 static device_method_t ixl_methods[] = {
227         /* Device interface */
228         DEVMETHOD(device_probe, ixl_probe),
229         DEVMETHOD(device_attach, ixl_attach),
230         DEVMETHOD(device_detach, ixl_detach),
231         DEVMETHOD(device_shutdown, ixl_shutdown),
232 #ifdef PCI_IOV
233         DEVMETHOD(pci_init_iov, ixl_init_iov),
234         DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
235         DEVMETHOD(pci_add_vf, ixl_add_vf),
236 #endif
237         {0, 0}
238 };
239
240 static driver_t ixl_driver = {
241         "ixl", ixl_methods, sizeof(struct ixl_pf),
242 };
243
244 devclass_t ixl_devclass;
245 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
246
247 MODULE_DEPEND(ixl, pci, 1, 1, 1);
248 MODULE_DEPEND(ixl, ether, 1, 1, 1);
249 #ifdef DEV_NETMAP
250 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
251 #endif /* DEV_NETMAP */
252
253 /*
254 ** Global reset mutex
255 */
256 static struct mtx ixl_reset_mtx;
257
258 /*
259 ** TUNEABLE PARAMETERS:
260 */
261
262 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
263                    "IXL driver parameters");
264
265 /*
266  * MSIX should be the default for best performance,
267  * but this allows it to be forced off for testing.
268  */
269 static int ixl_enable_msix = 1;
270 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
272     "Enable MSI-X interrupts");
273
274 /*
275 ** Number of descriptors per ring:
276 **   - TX and RX are the same size
277 */
278 static int ixl_ringsz = DEFAULT_RING;
279 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
280 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
281     &ixl_ringsz, 0, "Descriptor Ring Size");
282
283 /* 
284 ** This can be set manually, if left as 0 the
285 ** number of queues will be calculated based
286 ** on cpus and msix vectors available.
287 */
288 int ixl_max_queues = 0;
289 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
290 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
291     &ixl_max_queues, 0, "Number of Queues");
292
293 /*
294 ** Controls for Interrupt Throttling 
295 **      - true/false for dynamic adjustment
296 **      - default values for static ITR
297 */
298 int ixl_dynamic_rx_itr = 0;
299 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
300 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
301     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
302
303 int ixl_dynamic_tx_itr = 0;
304 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
305 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
306     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
307
308 int ixl_rx_itr = IXL_ITR_8K;
309 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
310 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
311     &ixl_rx_itr, 0, "RX Interrupt Rate");
312
313 int ixl_tx_itr = IXL_ITR_4K;
314 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
315 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
316     &ixl_tx_itr, 0, "TX Interrupt Rate");
317
318 #ifdef IXL_FDIR
319 static int ixl_enable_fdir = 1;
320 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
321 /* Rate at which we sample */
322 int ixl_atr_rate = 20;
323 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
324 #endif
325
326 #ifdef DEV_NETMAP
327 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
328 #include <dev/netmap/if_ixl_netmap.h>
329 #endif /* DEV_NETMAP */
330
331 static char *ixl_fc_string[6] = {
332         "None",
333         "Rx",
334         "Tx",
335         "Full",
336         "Priority",
337         "Default"
338 };
339
340 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
341
342 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
343     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
344
345 /*********************************************************************
346  *  Device identification routine
347  *
348  *  ixl_probe determines if the driver should be loaded on
349  *  the hardware based on PCI vendor/device id of the device.
350  *
351  *  return BUS_PROBE_DEFAULT on success, positive on failure
352  *********************************************************************/
353
354 static int
355 ixl_probe(device_t dev)
356 {
357         ixl_vendor_info_t *ent;
358
359         u16     pci_vendor_id, pci_device_id;
360         u16     pci_subvendor_id, pci_subdevice_id;
361         char    device_name[256];
362         static bool lock_init = FALSE;
363
364         INIT_DEBUGOUT("ixl_probe: begin");
365
366         pci_vendor_id = pci_get_vendor(dev);
367         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
368                 return (ENXIO);
369
370         pci_device_id = pci_get_device(dev);
371         pci_subvendor_id = pci_get_subvendor(dev);
372         pci_subdevice_id = pci_get_subdevice(dev);
373
374         ent = ixl_vendor_info_array;
375         while (ent->vendor_id != 0) {
376                 if ((pci_vendor_id == ent->vendor_id) &&
377                     (pci_device_id == ent->device_id) &&
378
379                     ((pci_subvendor_id == ent->subvendor_id) ||
380                      (ent->subvendor_id == 0)) &&
381
382                     ((pci_subdevice_id == ent->subdevice_id) ||
383                      (ent->subdevice_id == 0))) {
384                         sprintf(device_name, "%s, Version - %s",
385                                 ixl_strings[ent->index],
386                                 ixl_driver_version);
387                         device_set_desc_copy(dev, device_name);
388                         /* One shot mutex init */
389                         if (lock_init == FALSE) {
390                                 lock_init = TRUE;
391                                 mtx_init(&ixl_reset_mtx,
392                                     "ixl_reset",
393                                     "IXL RESET Lock", MTX_DEF);
394                         }
395                         return (BUS_PROBE_DEFAULT);
396                 }
397                 ent++;
398         }
399         return (ENXIO);
400 }
401
402 /*********************************************************************
403  *  Device initialization routine
404  *
405  *  The attach entry point is called when the driver is being loaded.
406  *  This routine identifies the type of hardware, allocates all resources
407  *  and initializes the hardware.
408  *
409  *  return 0 on success, positive on failure
410  *********************************************************************/
411
412 static int
413 ixl_attach(device_t dev)
414 {
415         struct ixl_pf   *pf;
416         struct i40e_hw  *hw;
417         struct ixl_vsi *vsi;
418         u16             bus;
419         int             error = 0;
420 #ifdef PCI_IOV
421         nvlist_t        *pf_schema, *vf_schema;
422         int             iov_error;
423 #endif
424
425         INIT_DEBUGOUT("ixl_attach: begin");
426
427         /* Allocate, clear, and link in our primary soft structure */
428         pf = device_get_softc(dev);
429         pf->dev = pf->osdep.dev = dev;
430         hw = &pf->hw;
431
432         /*
433         ** Note this assumes we have a single embedded VSI,
434         ** this could be enhanced later to allocate multiple
435         */
436         vsi = &pf->vsi;
437         vsi->dev = pf->dev;
438
439         /* Core Lock Init*/
440         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
441
442         /* Set up the timer callout */
443         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
444
445         /* Set up sysctls */
446         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
449             pf, 0, ixl_set_flowcntl, "I", "Flow Control");
450
451         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
454             pf, 0, ixl_set_advertise, "I", "Advertised Speed");
455
456         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
459             pf, 0, ixl_current_speed, "A", "Current Port Speed");
460
461         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
464             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
465
466         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
467             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468             OID_AUTO, "rx_itr", CTLFLAG_RW,
469             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
470
471         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
472             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
474             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
475
476         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
477             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
478             OID_AUTO, "tx_itr", CTLFLAG_RW,
479             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
480
481         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
482             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
484             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
485
486 #ifdef IXL_DEBUG_SYSCTL
487         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
490             ixl_debug_info, "I", "Debug Information");
491
492         /* Debug shared-code message level */
493         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
494             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495             OID_AUTO, "debug_mask", CTLFLAG_RW,
496             &pf->hw.debug_mask, 0, "Debug Message Level");
497
498         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
499             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
500             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
501             0, "PF/VF Virtual Channel debug level");
502
503         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
504             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
505             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
506             pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
507
508         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
511             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
512
513         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
516             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
517
518         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
519             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
520             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
521             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
522
523         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
524             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
525             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
526             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
527 #endif
528
529         /* Save off the PCI information */
530         hw->vendor_id = pci_get_vendor(dev);
531         hw->device_id = pci_get_device(dev);
532         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
533         hw->subsystem_vendor_id =
534             pci_read_config(dev, PCIR_SUBVEND_0, 2);
535         hw->subsystem_device_id =
536             pci_read_config(dev, PCIR_SUBDEV_0, 2);
537
538         hw->bus.device = pci_get_slot(dev);
539         hw->bus.func = pci_get_function(dev);
540
541         pf->vc_debug_lvl = 1;
542
543         /* Do PCI setup - map BAR0, etc */
544         if (ixl_allocate_pci_resources(pf)) {
545                 device_printf(dev, "Allocation of PCI resources failed\n");
546                 error = ENXIO;
547                 goto err_out;
548         }
549
550         /* Establish a clean starting point */
551         i40e_clear_hw(hw);
552         error = i40e_pf_reset(hw);
553         if (error) {
554                 device_printf(dev,"PF reset failure %x\n", error);
555                 error = EIO;
556                 goto err_out;
557         }
558
559         /* Set admin queue parameters */
560         hw->aq.num_arq_entries = IXL_AQ_LEN;
561         hw->aq.num_asq_entries = IXL_AQ_LEN;
562         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
563         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
564
565         /* Initialize the shared code */
566         error = i40e_init_shared_code(hw);
567         if (error) {
568                 device_printf(dev,"Unable to initialize the shared code\n");
569                 error = EIO;
570                 goto err_out;
571         }
572
573         /* Set up the admin queue */
574         error = i40e_init_adminq(hw);
575         if (error) {
576                 device_printf(dev, "The driver for the device stopped "
577                     "because the NVM image is newer than expected.\n"
578                     "You must install the most recent version of "
579                     " the network driver.\n");
580                 goto err_out;
581         }
582         device_printf(dev, "%s\n", ixl_fw_version_str(hw));
583
584         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
585             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
586                 device_printf(dev, "The driver for the device detected "
587                     "a newer version of the NVM image than expected.\n"
588                     "Please install the most recent version of the network driver.\n");
589         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
590             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
591                 device_printf(dev, "The driver for the device detected "
592                     "an older version of the NVM image than expected.\n"
593                     "Please update the NVM image.\n");
594
595         /* Clear PXE mode */
596         i40e_clear_pxe_mode(hw);
597
598         /* Get capabilities from the device */
599         error = ixl_get_hw_capabilities(pf);
600         if (error) {
601                 device_printf(dev, "HW capabilities failure!\n");
602                 goto err_get_cap;
603         }
604
605         /* Set up host memory cache */
606         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
607             hw->func_caps.num_rx_qp, 0, 0);
608         if (error) {
609                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
610                 goto err_get_cap;
611         }
612
613         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
614         if (error) {
615                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
616                 goto err_mac_hmc;
617         }
618
619         /* Disable LLDP from the firmware */
620         i40e_aq_stop_lldp(hw, TRUE, NULL);
621
622         i40e_get_mac_addr(hw, hw->mac.addr);
623         error = i40e_validate_mac_addr(hw->mac.addr);
624         if (error) {
625                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
626                 goto err_mac_hmc;
627         }
628         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
629         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
630
631         /* Set up VSI and queues */
632         if (ixl_setup_stations(pf) != 0) { 
633                 device_printf(dev, "setup stations failed!\n");
634                 error = ENOMEM;
635                 goto err_mac_hmc;
636         }
637
638         /* Initialize mac filter list for VSI */
639         SLIST_INIT(&vsi->ftl);
640
641         /* Set up interrupt routing here */
642         if (pf->msix > 1)
643                 error = ixl_assign_vsi_msix(pf);
644         else
645                 error = ixl_assign_vsi_legacy(pf);
646         if (error) 
647                 goto err_late;
648
649         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
650             (hw->aq.fw_maj_ver < 4)) {
651                 i40e_msec_delay(75);
652                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
653                 if (error)
654                         device_printf(dev, "link restart failed, aq_err=%d\n",
655                             pf->hw.aq.asq_last_status);
656         }
657
658         /* Determine link state */
659         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
660         i40e_get_link_status(hw, &pf->link_up);
661
662         /* Setup OS specific network interface */
663         if (ixl_setup_interface(dev, vsi) != 0) {
664                 device_printf(dev, "interface setup failed!\n");
665                 error = EIO;
666                 goto err_late;
667         }
668
669         error = ixl_switch_config(pf);
670         if (error) {
671                 device_printf(dev, "Initial switch config failed: %d\n", error);
672                 goto err_mac_hmc;
673         }
674
675         /* Limit phy interrupts to link and modules failure */
676         error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN |
677                 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
678         if (error)
679                 device_printf(dev, "set phy mask failed: %d\n", error);
680
681         /* Get the bus configuration and set the shared code */
682         bus = ixl_get_bus_info(hw, dev);
683         i40e_set_pci_config_data(hw, bus);
684
685         /* Initialize statistics */
686         ixl_pf_reset_stats(pf);
687         ixl_update_stats_counters(pf);
688         ixl_add_hw_stats(pf);
689
690         /* Register for VLAN events */
691         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
692             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
693         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
694             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
695
696 #ifdef PCI_IOV
697         /* SR-IOV is only supported when MSI-X is in use. */
698         if (pf->msix > 1) {
699                 pf_schema = pci_iov_schema_alloc_node();
700                 vf_schema = pci_iov_schema_alloc_node();
701                 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
702                 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
703                     IOV_SCHEMA_HASDEFAULT, TRUE);
704                 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
705                     IOV_SCHEMA_HASDEFAULT, FALSE);
706                 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
707                     IOV_SCHEMA_HASDEFAULT, FALSE);
708
709                 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
710                 if (iov_error != 0)
711                         device_printf(dev,
712                             "Failed to initialize SR-IOV (error=%d)\n",
713                             iov_error);
714         }
715 #endif
716
717 #ifdef DEV_NETMAP
718         ixl_netmap_attach(vsi);
719 #endif /* DEV_NETMAP */
720         INIT_DEBUGOUT("ixl_attach: end");
721         return (0);
722
723 err_late:
724         if (vsi->ifp != NULL)
725                 if_free(vsi->ifp);
726 err_mac_hmc:
727         i40e_shutdown_lan_hmc(hw);
728 err_get_cap:
729         i40e_shutdown_adminq(hw);
730 err_out:
731         ixl_free_pci_resources(pf);
732         ixl_free_vsi(vsi);
733         IXL_PF_LOCK_DESTROY(pf);
734         return (error);
735 }
736
737 /*********************************************************************
738  *  Device removal routine
739  *
740  *  The detach entry point is called when the driver is being removed.
741  *  This routine stops the adapter and deallocates all the resources
742  *  that were allocated for driver operation.
743  *
744  *  return 0 on success, positive on failure
745  *********************************************************************/
746
747 static int
748 ixl_detach(device_t dev)
749 {
750         struct ixl_pf           *pf = device_get_softc(dev);
751         struct i40e_hw          *hw = &pf->hw;
752         struct ixl_vsi          *vsi = &pf->vsi;
753         struct ixl_queue        *que = vsi->queues;
754         i40e_status             status;
755 #ifdef PCI_IOV
756         int                     error;
757 #endif
758
759         INIT_DEBUGOUT("ixl_detach: begin");
760
761         /* Make sure VLANS are not using driver */
762         if (vsi->ifp->if_vlantrunk != NULL) {
763                 device_printf(dev,"Vlan in use, detach first\n");
764                 return (EBUSY);
765         }
766
767 #ifdef PCI_IOV
768         error = pci_iov_detach(dev);
769         if (error != 0) {
770                 device_printf(dev, "SR-IOV in use; detach first.\n");
771                 return (error);
772         }
773 #endif
774
775         ether_ifdetach(vsi->ifp);
776         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
777                 IXL_PF_LOCK(pf);
778                 ixl_stop(pf);
779                 IXL_PF_UNLOCK(pf);
780         }
781
782         for (int i = 0; i < vsi->num_queues; i++, que++) {
783                 if (que->tq) {
784                         taskqueue_drain(que->tq, &que->task);
785                         taskqueue_drain(que->tq, &que->tx_task);
786                         taskqueue_free(que->tq);
787                 }
788         }
789
790         /* Shutdown LAN HMC */
791         status = i40e_shutdown_lan_hmc(hw);
792         if (status)
793                 device_printf(dev,
794                     "Shutdown LAN HMC failed with code %d\n", status);
795
796         /* Shutdown admin queue */
797         status = i40e_shutdown_adminq(hw);
798         if (status)
799                 device_printf(dev,
800                     "Shutdown Admin queue failed with code %d\n", status);
801
802         /* Unregister VLAN events */
803         if (vsi->vlan_attach != NULL)
804                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
805         if (vsi->vlan_detach != NULL)
806                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
807
808         callout_drain(&pf->timer);
809 #ifdef DEV_NETMAP
810         netmap_detach(vsi->ifp);
811 #endif /* DEV_NETMAP */
812         ixl_free_pci_resources(pf);
813         bus_generic_detach(dev);
814         if_free(vsi->ifp);
815         ixl_free_vsi(vsi);
816         IXL_PF_LOCK_DESTROY(pf);
817         return (0);
818 }
819
820 /*********************************************************************
821  *
822  *  Shutdown entry point
823  *
824  **********************************************************************/
825
826 static int
827 ixl_shutdown(device_t dev)
828 {
829         struct ixl_pf *pf = device_get_softc(dev);
830         IXL_PF_LOCK(pf);
831         ixl_stop(pf);
832         IXL_PF_UNLOCK(pf);
833         return (0);
834 }
835
836
837 /*********************************************************************
838  *
839  *  Get the hardware capabilities
840  *
841  **********************************************************************/
842
843 static int
844 ixl_get_hw_capabilities(struct ixl_pf *pf)
845 {
846         struct i40e_aqc_list_capabilities_element_resp *buf;
847         struct i40e_hw  *hw = &pf->hw;
848         device_t        dev = pf->dev;
849         int             error, len;
850         u16             needed;
851         bool            again = TRUE;
852
853         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
854 retry:
855         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
856             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
857                 device_printf(dev, "Unable to allocate cap memory\n");
858                 return (ENOMEM);
859         }
860
861         /* This populates the hw struct */
862         error = i40e_aq_discover_capabilities(hw, buf, len,
863             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
864         free(buf, M_DEVBUF);
865         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
866             (again == TRUE)) {
867                 /* retry once with a larger buffer */
868                 again = FALSE;
869                 len = needed;
870                 goto retry;
871         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
872                 device_printf(dev, "capability discovery failed: %d\n",
873                     pf->hw.aq.asq_last_status);
874                 return (ENODEV);
875         }
876
877         /* Capture this PF's starting queue pair */
878         pf->qbase = hw->func_caps.base_queue;
879
880 #ifdef IXL_DEBUG
881         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
882             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
883             hw->pf_id, hw->func_caps.num_vfs,
884             hw->func_caps.num_msix_vectors,
885             hw->func_caps.num_msix_vectors_vf,
886             hw->func_caps.fd_filters_guaranteed,
887             hw->func_caps.fd_filters_best_effort,
888             hw->func_caps.num_tx_qp,
889             hw->func_caps.num_rx_qp,
890             hw->func_caps.base_queue);
891 #endif
892         return (error);
893 }
894
895 static void
896 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
897 {
898         device_t        dev = vsi->dev;
899
900         /* Enable/disable TXCSUM/TSO4 */
901         if (!(ifp->if_capenable & IFCAP_TXCSUM)
902             && !(ifp->if_capenable & IFCAP_TSO4)) {
903                 if (mask & IFCAP_TXCSUM) {
904                         ifp->if_capenable |= IFCAP_TXCSUM;
905                         /* enable TXCSUM, restore TSO if previously enabled */
906                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
907                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
908                                 ifp->if_capenable |= IFCAP_TSO4;
909                         }
910                 }
911                 else if (mask & IFCAP_TSO4) {
912                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
913                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
914                         device_printf(dev,
915                             "TSO4 requires txcsum, enabling both...\n");
916                 }
917         } else if((ifp->if_capenable & IFCAP_TXCSUM)
918             && !(ifp->if_capenable & IFCAP_TSO4)) {
919                 if (mask & IFCAP_TXCSUM)
920                         ifp->if_capenable &= ~IFCAP_TXCSUM;
921                 else if (mask & IFCAP_TSO4)
922                         ifp->if_capenable |= IFCAP_TSO4;
923         } else if((ifp->if_capenable & IFCAP_TXCSUM)
924             && (ifp->if_capenable & IFCAP_TSO4)) {
925                 if (mask & IFCAP_TXCSUM) {
926                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
927                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
928                         device_printf(dev, 
929                             "TSO4 requires txcsum, disabling both...\n");
930                 } else if (mask & IFCAP_TSO4)
931                         ifp->if_capenable &= ~IFCAP_TSO4;
932         }
933
934         /* Enable/disable TXCSUM_IPV6/TSO6 */
935         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936             && !(ifp->if_capenable & IFCAP_TSO6)) {
937                 if (mask & IFCAP_TXCSUM_IPV6) {
938                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
939                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
940                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
941                                 ifp->if_capenable |= IFCAP_TSO6;
942                         }
943                 } else if (mask & IFCAP_TSO6) {
944                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
945                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
946                         device_printf(dev,
947                             "TSO6 requires txcsum6, enabling both...\n");
948                 }
949         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
950             && !(ifp->if_capenable & IFCAP_TSO6)) {
951                 if (mask & IFCAP_TXCSUM_IPV6)
952                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
953                 else if (mask & IFCAP_TSO6)
954                         ifp->if_capenable |= IFCAP_TSO6;
955         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
956             && (ifp->if_capenable & IFCAP_TSO6)) {
957                 if (mask & IFCAP_TXCSUM_IPV6) {
958                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
959                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
960                         device_printf(dev,
961                             "TSO6 requires txcsum6, disabling both...\n");
962                 } else if (mask & IFCAP_TSO6)
963                         ifp->if_capenable &= ~IFCAP_TSO6;
964         }
965 }
966
967 /*********************************************************************
968  *  Ioctl entry point
969  *
970  *  ixl_ioctl is called when the user wants to configure the
971  *  interface.
972  *
973  *  return 0 on success, positive on failure
974  **********************************************************************/
975
976 static int
977 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
978 {
979         struct ixl_vsi  *vsi = ifp->if_softc;
980         struct ixl_pf   *pf = vsi->back;
981         struct ifreq    *ifr = (struct ifreq *) data;
982 #if defined(INET) || defined(INET6)
983         struct ifaddr *ifa = (struct ifaddr *)data;
984         bool            avoid_reset = FALSE;
985 #endif
986         int             error = 0;
987
988         switch (command) {
989
990         case SIOCSIFADDR:
991 #ifdef INET
992                 if (ifa->ifa_addr->sa_family == AF_INET)
993                         avoid_reset = TRUE;
994 #endif
995 #ifdef INET6
996                 if (ifa->ifa_addr->sa_family == AF_INET6)
997                         avoid_reset = TRUE;
998 #endif
999 #if defined(INET) || defined(INET6)
1000                 /*
1001                 ** Calling init results in link renegotiation,
1002                 ** so we avoid doing it when possible.
1003                 */
1004                 if (avoid_reset) {
1005                         ifp->if_flags |= IFF_UP;
1006                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1007                                 ixl_init(pf);
1008 #ifdef INET
1009                         if (!(ifp->if_flags & IFF_NOARP))
1010                                 arp_ifinit(ifp, ifa);
1011 #endif
1012                 } else
1013                         error = ether_ioctl(ifp, command, data);
1014                 break;
1015 #endif
1016         case SIOCSIFMTU:
1017                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1018                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1019                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1020                         error = EINVAL;
1021                 } else {
1022                         IXL_PF_LOCK(pf);
1023                         ifp->if_mtu = ifr->ifr_mtu;
1024                         vsi->max_frame_size =
1025                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1026                             + ETHER_VLAN_ENCAP_LEN;
1027                         ixl_init_locked(pf);
1028                         IXL_PF_UNLOCK(pf);
1029                 }
1030                 break;
1031         case SIOCSIFFLAGS:
1032                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1033                 IXL_PF_LOCK(pf);
1034                 if (ifp->if_flags & IFF_UP) {
1035                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1036                                 if ((ifp->if_flags ^ pf->if_flags) &
1037                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1038                                         ixl_set_promisc(vsi);
1039                                 }
1040                         } else
1041                                 ixl_init_locked(pf);
1042                 } else
1043                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1044                                 ixl_stop(pf);
1045                 pf->if_flags = ifp->if_flags;
1046                 IXL_PF_UNLOCK(pf);
1047                 break;
1048         case SIOCADDMULTI:
1049                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1050                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1051                         IXL_PF_LOCK(pf);
1052                         ixl_disable_intr(vsi);
1053                         ixl_add_multi(vsi);
1054                         ixl_enable_intr(vsi);
1055                         IXL_PF_UNLOCK(pf);
1056                 }
1057                 break;
1058         case SIOCDELMULTI:
1059                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1060                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1061                         IXL_PF_LOCK(pf);
1062                         ixl_disable_intr(vsi);
1063                         ixl_del_multi(vsi);
1064                         ixl_enable_intr(vsi);
1065                         IXL_PF_UNLOCK(pf);
1066                 }
1067                 break;
1068         case SIOCSIFMEDIA:
1069         case SIOCGIFMEDIA:
1070 #ifdef IFM_ETH_XTYPE
1071         case SIOCGIFXMEDIA:
1072 #endif
1073                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1074                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1075                 break;
1076         case SIOCSIFCAP:
1077         {
1078                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1079                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1080
1081                 ixl_cap_txcsum_tso(vsi, ifp, mask);
1082
1083                 if (mask & IFCAP_RXCSUM)
1084                         ifp->if_capenable ^= IFCAP_RXCSUM;
1085                 if (mask & IFCAP_RXCSUM_IPV6)
1086                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1087                 if (mask & IFCAP_LRO)
1088                         ifp->if_capenable ^= IFCAP_LRO;
1089                 if (mask & IFCAP_VLAN_HWTAGGING)
1090                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1091                 if (mask & IFCAP_VLAN_HWFILTER)
1092                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1093                 if (mask & IFCAP_VLAN_HWTSO)
1094                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1095                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1096                         IXL_PF_LOCK(pf);
1097                         ixl_init_locked(pf);
1098                         IXL_PF_UNLOCK(pf);
1099                 }
1100                 VLAN_CAPABILITIES(ifp);
1101
1102                 break;
1103         }
1104
1105         default:
1106                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1107                 error = ether_ioctl(ifp, command, data);
1108                 break;
1109         }
1110
1111         return (error);
1112 }
1113
1114
1115 /*********************************************************************
1116  *  Init entry point
1117  *
1118  *  This routine is used in two ways. It is used by the stack as
1119  *  init entry point in network interface structure. It is also used
1120  *  by the driver as a hw/sw initialization routine to get to a
1121  *  consistent state.
1122  *
1123  *  return 0 on success, positive on failure
1124  **********************************************************************/
1125
1126 static void
1127 ixl_init_locked(struct ixl_pf *pf)
1128 {
1129         struct i40e_hw  *hw = &pf->hw;
1130         struct ixl_vsi  *vsi = &pf->vsi;
1131         struct ifnet    *ifp = vsi->ifp;
1132         device_t        dev = pf->dev;
1133         struct i40e_filter_control_settings     filter;
1134         u8              tmpaddr[ETHER_ADDR_LEN];
1135         int             ret;
1136
1137         mtx_assert(&pf->pf_mtx, MA_OWNED);
1138         INIT_DEBUGOUT("ixl_init: begin");
1139         ixl_stop(pf);
1140
1141         /* Get the latest mac address... User might use a LAA */
1142         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1143               I40E_ETH_LENGTH_OF_ADDRESS);
1144         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
1145             (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1146                 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1147                 bcopy(tmpaddr, hw->mac.addr,
1148                     I40E_ETH_LENGTH_OF_ADDRESS);
1149                 ret = i40e_aq_mac_address_write(hw,
1150                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1151                     hw->mac.addr, NULL);
1152                 if (ret) {
1153                         device_printf(dev, "LLA address"
1154                          "change failed!!\n");
1155                         return;
1156                 } else {
1157                         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1158                 }
1159         }
1160
1161         /* Set the various hardware offload abilities */
1162         ifp->if_hwassist = 0;
1163         if (ifp->if_capenable & IFCAP_TSO)
1164                 ifp->if_hwassist |= CSUM_TSO;
1165         if (ifp->if_capenable & IFCAP_TXCSUM)
1166                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1167         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1168                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1169
1170         /* Set up the device filtering */
1171         bzero(&filter, sizeof(filter));
1172         filter.enable_ethtype = TRUE;
1173         filter.enable_macvlan = TRUE;
1174 #ifdef IXL_FDIR
1175         filter.enable_fdir = TRUE;
1176 #endif
1177         if (i40e_set_filter_control(hw, &filter))
1178                 device_printf(dev, "set_filter_control() failed\n");
1179
1180         /* Set up RSS */
1181         ixl_config_rss(vsi);
1182
1183         /*
1184         ** Prepare the VSI: rings, hmc contexts, etc...
1185         */
1186         if (ixl_initialize_vsi(vsi)) {
1187                 device_printf(dev, "initialize vsi failed!!\n");
1188                 return;
1189         }
1190
1191         /* Add protocol filters to list */
1192         ixl_init_filters(vsi);
1193
1194         /* Setup vlan's if needed */
1195         ixl_setup_vlan_filters(vsi);
1196
1197         /* Start the local timer */
1198         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1199
1200         /* Set up MSI/X routing and the ITR settings */
1201         if (ixl_enable_msix) {
1202                 ixl_configure_msix(pf);
1203                 ixl_configure_itr(pf);
1204         } else
1205                 ixl_configure_legacy(pf);
1206
1207         ixl_enable_rings(vsi);
1208
1209         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1210
1211         ixl_reconfigure_filters(vsi);
1212
1213         /* Set MTU in hardware*/
1214         int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1215             TRUE, 0, NULL);
1216         if (aq_error)
1217                 device_printf(vsi->dev,
1218                         "aq_set_mac_config in init error, code %d\n",
1219                     aq_error);
1220
1221         /* And now turn on interrupts */
1222         ixl_enable_intr(vsi);
1223
1224         /* Now inform the stack we're ready */
1225         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1226         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1227
1228         return;
1229 }
1230
1231 static void
1232 ixl_init(void *arg)
1233 {
1234         struct ixl_pf *pf = arg;
1235
1236         IXL_PF_LOCK(pf);
1237         ixl_init_locked(pf);
1238         IXL_PF_UNLOCK(pf);
1239         return;
1240 }
1241
1242 /*
1243 **
1244 ** MSIX Interrupt Handlers and Tasklets
1245 **
1246 */
1247 static void
1248 ixl_handle_que(void *context, int pending)
1249 {
1250         struct ixl_queue *que = context;
1251         struct ixl_vsi *vsi = que->vsi;
1252         struct i40e_hw  *hw = vsi->hw;
1253         struct tx_ring  *txr = &que->txr;
1254         struct ifnet    *ifp = vsi->ifp;
1255         bool            more;
1256
1257         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1258                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1259                 IXL_TX_LOCK(txr);
1260                 ixl_txeof(que);
1261                 if (!drbr_empty(ifp, txr->br))
1262                         ixl_mq_start_locked(ifp, txr);
1263                 IXL_TX_UNLOCK(txr);
1264                 if (more) {
1265                         taskqueue_enqueue(que->tq, &que->task);
1266                         return;
1267                 }
1268         }
1269
1270         /* Reenable this interrupt - hmmm */
1271         ixl_enable_queue(hw, que->me);
1272         return;
1273 }
1274
1275
1276 /*********************************************************************
1277  *
1278  *  Legacy Interrupt Service routine
1279  *
1280  **********************************************************************/
1281 void
1282 ixl_intr(void *arg)
1283 {
1284         struct ixl_pf           *pf = arg;
1285         struct i40e_hw          *hw =  &pf->hw;
1286         struct ixl_vsi          *vsi = &pf->vsi;
1287         struct ixl_queue        *que = vsi->queues;
1288         struct ifnet            *ifp = vsi->ifp;
1289         struct tx_ring          *txr = &que->txr;
1290         u32                     reg, icr0, mask;
1291         bool                    more_tx, more_rx;
1292
1293         ++que->irqs;
1294
1295         /* Protect against spurious interrupts */
1296         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1297                 return;
1298
1299         icr0 = rd32(hw, I40E_PFINT_ICR0);
1300
1301         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1302         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1303         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1304
1305         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1306
1307 #ifdef PCI_IOV
1308         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1309                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1310 #endif
1311
1312         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1313                 taskqueue_enqueue(pf->tq, &pf->adminq);
1314                 return;
1315         }
1316
1317         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1318
1319         IXL_TX_LOCK(txr);
1320         more_tx = ixl_txeof(que);
1321         if (!drbr_empty(vsi->ifp, txr->br))
1322                 more_tx = 1;
1323         IXL_TX_UNLOCK(txr);
1324
1325         /* re-enable other interrupt causes */
1326         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1327
1328         /* And now the queues */
1329         reg = rd32(hw, I40E_QINT_RQCTL(0));
1330         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1331         wr32(hw, I40E_QINT_RQCTL(0), reg);
1332
1333         reg = rd32(hw, I40E_QINT_TQCTL(0));
1334         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1335         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1336         wr32(hw, I40E_QINT_TQCTL(0), reg);
1337
1338         ixl_enable_legacy(hw);
1339
1340         return;
1341 }
1342
1343
1344 /*********************************************************************
1345  *
1346  *  MSIX VSI Interrupt Service routine
1347  *
1348  **********************************************************************/
1349 void
1350 ixl_msix_que(void *arg)
1351 {
1352         struct ixl_queue        *que = arg;
1353         struct ixl_vsi  *vsi = que->vsi;
1354         struct i40e_hw  *hw = vsi->hw;
1355         struct tx_ring  *txr = &que->txr;
1356         bool            more_tx, more_rx;
1357
1358         /* Protect against spurious interrupts */
1359         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1360                 return;
1361
1362         ++que->irqs;
1363
1364         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1365
1366         IXL_TX_LOCK(txr);
1367         more_tx = ixl_txeof(que);
1368         /*
1369         ** Make certain that if the stack 
1370         ** has anything queued the task gets
1371         ** scheduled to handle it.
1372         */
1373         if (!drbr_empty(vsi->ifp, txr->br))
1374                 more_tx = 1;
1375         IXL_TX_UNLOCK(txr);
1376
1377         ixl_set_queue_rx_itr(que);
1378         ixl_set_queue_tx_itr(que);
1379
1380         if (more_tx || more_rx)
1381                 taskqueue_enqueue(que->tq, &que->task);
1382         else
1383                 ixl_enable_queue(hw, que->me);
1384
1385         return;
1386 }
1387
1388
1389 /*********************************************************************
1390  *
1391  *  MSIX Admin Queue Interrupt Service routine
1392  *
1393  **********************************************************************/
1394 static void
1395 ixl_msix_adminq(void *arg)
1396 {
1397         struct ixl_pf   *pf = arg;
1398         struct i40e_hw  *hw = &pf->hw;
1399         u32             reg, mask;
1400
1401         ++pf->admin_irq;
1402
1403         reg = rd32(hw, I40E_PFINT_ICR0);
1404         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1405
1406         /* Check on the cause */
1407         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1408                 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1409
1410         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1411                 ixl_handle_mdd_event(pf);
1412                 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1413         }
1414
1415 #ifdef PCI_IOV
1416         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1417                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1418                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1419         }
1420 #endif
1421
1422         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1423         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1424         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1425
1426         taskqueue_enqueue(pf->tq, &pf->adminq);
1427         return;
1428 }
1429
1430 /*********************************************************************
1431  *
1432  *  Media Ioctl callback
1433  *
1434  *  This routine is called whenever the user queries the status of
1435  *  the interface using ifconfig.
1436  *
1437  **********************************************************************/
1438 static void
1439 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1440 {
1441         struct ixl_vsi  *vsi = ifp->if_softc;
1442         struct ixl_pf   *pf = vsi->back;
1443         struct i40e_hw  *hw = &pf->hw;
1444
1445         INIT_DEBUGOUT("ixl_media_status: begin");
1446         IXL_PF_LOCK(pf);
1447
1448         hw->phy.get_link_info = TRUE;
1449         i40e_get_link_status(hw, &pf->link_up);
1450         ixl_update_link_status(pf);
1451
1452         ifmr->ifm_status = IFM_AVALID;
1453         ifmr->ifm_active = IFM_ETHER;
1454
1455         if (!pf->link_up) {
1456                 IXL_PF_UNLOCK(pf);
1457                 return;
1458         }
1459
1460         ifmr->ifm_status |= IFM_ACTIVE;
1461         /* Hardware is always full-duplex */
1462         ifmr->ifm_active |= IFM_FDX;
1463
1464         switch (hw->phy.link_info.phy_type) {
1465                 /* 100 M */
1466                 case I40E_PHY_TYPE_100BASE_TX:
1467                         ifmr->ifm_active |= IFM_100_TX;
1468                         break;
1469                 /* 1 G */
1470                 case I40E_PHY_TYPE_1000BASE_T:
1471                         ifmr->ifm_active |= IFM_1000_T;
1472                         break;
1473                 case I40E_PHY_TYPE_1000BASE_SX:
1474                         ifmr->ifm_active |= IFM_1000_SX;
1475                         break;
1476                 case I40E_PHY_TYPE_1000BASE_LX:
1477                         ifmr->ifm_active |= IFM_1000_LX;
1478                         break;
1479                 /* 10 G */
1480                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1481                         ifmr->ifm_active |= IFM_10G_TWINAX;
1482                         break;
1483                 case I40E_PHY_TYPE_10GBASE_SR:
1484                         ifmr->ifm_active |= IFM_10G_SR;
1485                         break;
1486                 case I40E_PHY_TYPE_10GBASE_LR:
1487                         ifmr->ifm_active |= IFM_10G_LR;
1488                         break;
1489                 case I40E_PHY_TYPE_10GBASE_T:
1490                         ifmr->ifm_active |= IFM_10G_T;
1491                         break;
1492                 /* 40 G */
1493                 case I40E_PHY_TYPE_40GBASE_CR4:
1494                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1495                         ifmr->ifm_active |= IFM_40G_CR4;
1496                         break;
1497                 case I40E_PHY_TYPE_40GBASE_SR4:
1498                         ifmr->ifm_active |= IFM_40G_SR4;
1499                         break;
1500                 case I40E_PHY_TYPE_40GBASE_LR4:
1501                         ifmr->ifm_active |= IFM_40G_LR4;
1502                         break;
1503 #ifndef IFM_ETH_XTYPE
1504                 case I40E_PHY_TYPE_1000BASE_KX:
1505                         ifmr->ifm_active |= IFM_1000_CX;
1506                         break;
1507                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1508                 case I40E_PHY_TYPE_10GBASE_CR1:
1509                         ifmr->ifm_active |= IFM_10G_TWINAX;
1510                         break;
1511                 case I40E_PHY_TYPE_10GBASE_KX4:
1512                         ifmr->ifm_active |= IFM_10G_CX4;
1513                         break;
1514                 case I40E_PHY_TYPE_10GBASE_KR:
1515                         ifmr->ifm_active |= IFM_10G_SR;
1516                         break;
1517                 case I40E_PHY_TYPE_40GBASE_KR4:
1518                 case I40E_PHY_TYPE_XLPPI:
1519                         ifmr->ifm_active |= IFM_40G_SR4;
1520                         break;
1521 #else
1522                 case I40E_PHY_TYPE_1000BASE_KX:
1523                         ifmr->ifm_active |= IFM_1000_KX;
1524                         break;
1525                 /* ERJ: What's the difference between these? */
1526                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1527                 case I40E_PHY_TYPE_10GBASE_CR1:
1528                         ifmr->ifm_active |= IFM_10G_CR1;
1529                         break;
1530                 case I40E_PHY_TYPE_10GBASE_KX4:
1531                         ifmr->ifm_active |= IFM_10G_KX4;
1532                         break;
1533                 case I40E_PHY_TYPE_10GBASE_KR:
1534                         ifmr->ifm_active |= IFM_10G_KR;
1535                         break;
1536                 case I40E_PHY_TYPE_20GBASE_KR2:
1537                         ifmr->ifm_active |= IFM_20G_KR2;
1538                         break;
1539                 case I40E_PHY_TYPE_40GBASE_KR4:
1540                         ifmr->ifm_active |= IFM_40G_KR4;
1541                         break;
1542                 case I40E_PHY_TYPE_XLPPI:
1543                         ifmr->ifm_active |= IFM_40G_XLPPI;
1544                         break;
1545 #endif
1546                 default:
1547                         ifmr->ifm_active |= IFM_UNKNOWN;
1548                         break;
1549         }
1550         /* Report flow control status as well */
1551         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1552                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1553         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1554                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1555
1556         IXL_PF_UNLOCK(pf);
1557
1558         return;
1559 }
1560
1561 /*********************************************************************
1562  *
1563  *  Media Ioctl callback
1564  *
1565  *  This routine is called when the user changes speed/duplex using
1566  *  media/mediopt option with ifconfig.
1567  *
1568  **********************************************************************/
1569 static int
1570 ixl_media_change(struct ifnet * ifp)
1571 {
1572         struct ixl_vsi *vsi = ifp->if_softc;
1573         struct ifmedia *ifm = &vsi->media;
1574
1575         INIT_DEBUGOUT("ixl_media_change: begin");
1576
1577         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1578                 return (EINVAL);
1579
1580         if_printf(ifp, "Media change is currently not supported.\n");
1581
1582         return (ENODEV);
1583 }
1584
1585
1586 #ifdef IXL_FDIR
1587 /*
1588 ** ATR: Application Targetted Receive - creates a filter
1589 **      based on TX flow info that will keep the receive
1590 **      portion of the flow on the same queue. Based on the
1591 **      implementation this is only available for TCP connections
1592 */
1593 void
1594 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1595 {
1596         struct ixl_vsi                  *vsi = que->vsi;
1597         struct tx_ring                  *txr = &que->txr;
1598         struct i40e_filter_program_desc *FDIR;
1599         u32                             ptype, dtype;
1600         int                             idx;
1601
1602         /* check if ATR is enabled and sample rate */
1603         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1604                 return;
1605         /*
1606         ** We sample all TCP SYN/FIN packets,
1607         ** or at the selected sample rate 
1608         */
1609         txr->atr_count++;
1610         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1611             (txr->atr_count < txr->atr_rate))
1612                 return;
1613         txr->atr_count = 0;
1614
1615         /* Get a descriptor to use */
1616         idx = txr->next_avail;
1617         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1618         if (++idx == que->num_desc)
1619                 idx = 0;
1620         txr->avail--;
1621         txr->next_avail = idx;
1622
1623         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1624             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1625
1626         ptype |= (etype == ETHERTYPE_IP) ?
1627             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1628             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1629             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1630             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1631
1632         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1633
1634         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1635
1636         /*
1637         ** We use the TCP TH_FIN as a trigger to remove
1638         ** the filter, otherwise its an update.
1639         */
1640         dtype |= (th->th_flags & TH_FIN) ?
1641             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1642             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1643             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1644             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1645
1646         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1647             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1648
1649         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1650             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1651
1652         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1653         FDIR->dtype_cmd_cntindex = htole32(dtype);
1654         return;
1655 }
1656 #endif
1657
1658
1659 static void
1660 ixl_set_promisc(struct ixl_vsi *vsi)
1661 {
1662         struct ifnet    *ifp = vsi->ifp;
1663         struct i40e_hw  *hw = vsi->hw;
1664         int             err, mcnt = 0;
1665         bool            uni = FALSE, multi = FALSE;
1666
1667         if (ifp->if_flags & IFF_ALLMULTI)
1668                 multi = TRUE;
1669         else { /* Need to count the multicast addresses */
1670                 struct  ifmultiaddr *ifma;
1671                 if_maddr_rlock(ifp);
1672                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1673                         if (ifma->ifma_addr->sa_family != AF_LINK)
1674                                 continue;
1675                         if (mcnt == MAX_MULTICAST_ADDR)
1676                                 break;
1677                         mcnt++;
1678                 }
1679                 if_maddr_runlock(ifp);
1680         }
1681
1682         if (mcnt >= MAX_MULTICAST_ADDR)
1683                 multi = TRUE;
1684         if (ifp->if_flags & IFF_PROMISC)
1685                 uni = TRUE;
1686
1687         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1688             vsi->seid, uni, NULL);
1689         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1690             vsi->seid, multi, NULL);
1691         return;
1692 }
1693
1694 /*********************************************************************
1695  *      Filter Routines
1696  *
1697  *      Routines for multicast and vlan filter management.
1698  *
1699  *********************************************************************/
1700 static void
1701 ixl_add_multi(struct ixl_vsi *vsi)
1702 {
1703         struct  ifmultiaddr     *ifma;
1704         struct ifnet            *ifp = vsi->ifp;
1705         struct i40e_hw          *hw = vsi->hw;
1706         int                     mcnt = 0, flags;
1707
1708         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1709
1710         if_maddr_rlock(ifp);
1711         /*
1712         ** First just get a count, to decide if we
1713         ** we simply use multicast promiscuous.
1714         */
1715         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1716                 if (ifma->ifma_addr->sa_family != AF_LINK)
1717                         continue;
1718                 mcnt++;
1719         }
1720         if_maddr_runlock(ifp);
1721
1722         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1723                 /* delete existing MC filters */
1724                 ixl_del_hw_filters(vsi, mcnt);
1725                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1726                     vsi->seid, TRUE, NULL);
1727                 return;
1728         }
1729
1730         mcnt = 0;
1731         if_maddr_rlock(ifp);
1732         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1733                 if (ifma->ifma_addr->sa_family != AF_LINK)
1734                         continue;
1735                 ixl_add_mc_filter(vsi,
1736                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1737                 mcnt++;
1738         }
1739         if_maddr_runlock(ifp);
1740         if (mcnt > 0) {
1741                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1742                 ixl_add_hw_filters(vsi, flags, mcnt);
1743         }
1744
1745         IOCTL_DEBUGOUT("ixl_add_multi: end");
1746         return;
1747 }
1748
1749 static void
1750 ixl_del_multi(struct ixl_vsi *vsi)
1751 {
1752         struct ifnet            *ifp = vsi->ifp;
1753         struct ifmultiaddr      *ifma;
1754         struct ixl_mac_filter   *f;
1755         int                     mcnt = 0;
1756         bool            match = FALSE;
1757
1758         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1759
1760         /* Search for removed multicast addresses */
1761         if_maddr_rlock(ifp);
1762         SLIST_FOREACH(f, &vsi->ftl, next) {
1763                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1764                         match = FALSE;
1765                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1767                                         continue;
1768                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1769                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1770                                         match = TRUE;
1771                                         break;
1772                                 }
1773                         }
1774                         if (match == FALSE) {
1775                                 f->flags |= IXL_FILTER_DEL;
1776                                 mcnt++;
1777                         }
1778                 }
1779         }
1780         if_maddr_runlock(ifp);
1781
1782         if (mcnt > 0)
1783                 ixl_del_hw_filters(vsi, mcnt);
1784 }
1785
1786
1787 /*********************************************************************
1788  *  Timer routine
1789  *
1790  *  This routine checks for link status,updates statistics,
1791  *  and runs the watchdog check.
1792  *
1793  **********************************************************************/
1794
1795 static void
1796 ixl_local_timer(void *arg)
1797 {
1798         struct ixl_pf           *pf = arg;
1799         struct i40e_hw          *hw = &pf->hw;
1800         struct ixl_vsi          *vsi = &pf->vsi;
1801         struct ixl_queue        *que = vsi->queues;
1802         device_t                dev = pf->dev;
1803         int                     hung = 0;
1804         u32                     mask;
1805
1806         mtx_assert(&pf->pf_mtx, MA_OWNED);
1807
1808         /* Fire off the adminq task */
1809         taskqueue_enqueue(pf->tq, &pf->adminq);
1810
1811         /* Update stats */
1812         ixl_update_stats_counters(pf);
1813
1814         /*
1815         ** Check status of the queues
1816         */
1817         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1818                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1819  
1820         for (int i = 0; i < vsi->num_queues; i++,que++) {
1821                 /* Any queues with outstanding work get a sw irq */
1822                 if (que->busy)
1823                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1824                 /*
1825                 ** Each time txeof runs without cleaning, but there
1826                 ** are uncleaned descriptors it increments busy. If
1827                 ** we get to 5 we declare it hung.
1828                 */
1829                 if (que->busy == IXL_QUEUE_HUNG) {
1830                         ++hung;
1831                         /* Mark the queue as inactive */
1832                         vsi->active_queues &= ~((u64)1 << que->me);
1833                         continue;
1834                 } else {
1835                         /* Check if we've come back from hung */
1836                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1837                                 vsi->active_queues |= ((u64)1 << que->me);
1838                 }
1839                 if (que->busy >= IXL_MAX_TX_BUSY) {
1840 #ifdef IXL_DEBUG
1841                         device_printf(dev,"Warning queue %d "
1842                             "appears to be hung!\n", i);
1843 #endif
1844                         que->busy = IXL_QUEUE_HUNG;
1845                         ++hung;
1846                 }
1847         }
1848         /* Only reinit if all queues show hung */
1849         if (hung == vsi->num_queues)
1850                 goto hung;
1851
1852         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1853         return;
1854
1855 hung:
1856         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1857         ixl_init_locked(pf);
1858 }
1859
1860 /*
1861 ** Note: this routine updates the OS on the link state
1862 **      the real check of the hardware only happens with
1863 **      a link interrupt.
1864 */
1865 static void
1866 ixl_update_link_status(struct ixl_pf *pf)
1867 {
1868         struct ixl_vsi          *vsi = &pf->vsi;
1869         struct i40e_hw          *hw = &pf->hw;
1870         struct ifnet            *ifp = vsi->ifp;
1871         device_t                dev = pf->dev;
1872
1873         if (pf->link_up){ 
1874                 if (vsi->link_active == FALSE) {
1875                         pf->fc = hw->fc.current_mode;
1876                         if (bootverbose) {
1877                                 device_printf(dev,"Link is up %d Gbps %s,"
1878                                     " Flow Control: %s\n",
1879                                     ((pf->link_speed ==
1880                                     I40E_LINK_SPEED_40GB)? 40:10),
1881                                     "Full Duplex", ixl_fc_string[pf->fc]);
1882                         }
1883                         vsi->link_active = TRUE;
1884                         /*
1885                         ** Warn user if link speed on NPAR enabled
1886                         ** partition is not at least 10GB
1887                         */
1888                         if (hw->func_caps.npar_enable &&
1889                            (hw->phy.link_info.link_speed ==
1890                            I40E_LINK_SPEED_1GB ||
1891                            hw->phy.link_info.link_speed ==
1892                            I40E_LINK_SPEED_100MB))
1893                                 device_printf(dev, "The partition detected"
1894                                     "link speed that is less than 10Gbps\n");
1895                         if_link_state_change(ifp, LINK_STATE_UP);
1896                 }
1897         } else { /* Link down */
1898                 if (vsi->link_active == TRUE) {
1899                         if (bootverbose)
1900                                 device_printf(dev,"Link is Down\n");
1901                         if_link_state_change(ifp, LINK_STATE_DOWN);
1902                         vsi->link_active = FALSE;
1903                 }
1904         }
1905
1906         return;
1907 }
1908
1909 /*********************************************************************
1910  *
1911  *  This routine disables all traffic on the adapter by issuing a
1912  *  global reset on the MAC and deallocates TX/RX buffers.
1913  *
1914  **********************************************************************/
1915
1916 static void
1917 ixl_stop(struct ixl_pf *pf)
1918 {
1919         struct ixl_vsi  *vsi = &pf->vsi;
1920         struct ifnet    *ifp = vsi->ifp;
1921
1922         mtx_assert(&pf->pf_mtx, MA_OWNED);
1923
1924         INIT_DEBUGOUT("ixl_stop: begin\n");
1925         if (pf->num_vfs == 0)
1926                 ixl_disable_intr(vsi);
1927         else
1928                 ixl_disable_rings_intr(vsi);
1929         ixl_disable_rings(vsi);
1930
1931         /* Tell the stack that the interface is no longer active */
1932         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1933
1934         /* Stop the local timer */
1935         callout_stop(&pf->timer);
1936
1937         return;
1938 }
1939
1940
1941 /*********************************************************************
1942  *
1943  *  Setup MSIX Interrupt resources and handlers for the VSI
1944  *
1945  **********************************************************************/
1946 static int
1947 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1948 {
1949         device_t        dev = pf->dev;
1950         struct          ixl_vsi *vsi = &pf->vsi;
1951         struct          ixl_queue *que = vsi->queues;
1952         int             error, rid = 0;
1953
1954         if (pf->msix == 1)
1955                 rid = 1;
1956         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1957             &rid, RF_SHAREABLE | RF_ACTIVE);
1958         if (pf->res == NULL) {
1959                 device_printf(dev,"Unable to allocate"
1960                     " bus resource: vsi legacy/msi interrupt\n");
1961                 return (ENXIO);
1962         }
1963
1964         /* Set the handler function */
1965         error = bus_setup_intr(dev, pf->res,
1966             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1967             ixl_intr, pf, &pf->tag);
1968         if (error) {
1969                 pf->res = NULL;
1970                 device_printf(dev, "Failed to register legacy/msi handler");
1971                 return (error);
1972         }
1973         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1974         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1975         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1976         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1977             taskqueue_thread_enqueue, &que->tq);
1978         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1979             device_get_nameunit(dev));
1980         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1981
1982 #ifdef PCI_IOV
1983         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1984 #endif
1985
1986         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1987             taskqueue_thread_enqueue, &pf->tq);
1988         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1989             device_get_nameunit(dev));
1990
1991         return (0);
1992 }
1993
1994
1995 /*********************************************************************
1996  *
1997  *  Setup MSIX Interrupt resources and handlers for the VSI
1998  *
1999  **********************************************************************/
2000 static int
2001 ixl_assign_vsi_msix(struct ixl_pf *pf)
2002 {
2003         device_t        dev = pf->dev;
2004         struct          ixl_vsi *vsi = &pf->vsi;
2005         struct          ixl_queue *que = vsi->queues;
2006         struct          tx_ring  *txr;
2007         int             error, rid, vector = 0;
2008
2009         /* Admin Que is vector 0*/
2010         rid = vector + 1;
2011         pf->res = bus_alloc_resource_any(dev,
2012             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2013         if (!pf->res) {
2014                 device_printf(dev,"Unable to allocate"
2015             " bus resource: Adminq interrupt [%d]\n", rid);
2016                 return (ENXIO);
2017         }
2018         /* Set the adminq vector and handler */
2019         error = bus_setup_intr(dev, pf->res,
2020             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2021             ixl_msix_adminq, pf, &pf->tag);
2022         if (error) {
2023                 pf->res = NULL;
2024                 device_printf(dev, "Failed to register Admin que handler");
2025                 return (error);
2026         }
2027         bus_describe_intr(dev, pf->res, pf->tag, "aq");
2028         pf->admvec = vector;
2029         /* Tasklet for Admin Queue */
2030         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2031
2032 #ifdef PCI_IOV
2033         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2034 #endif
2035
2036         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2037             taskqueue_thread_enqueue, &pf->tq);
2038         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2039             device_get_nameunit(pf->dev));
2040         ++vector;
2041
2042         /* Now set up the stations */
2043         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2044                 int cpu_id = i;
2045                 rid = vector + 1;
2046                 txr = &que->txr;
2047                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2048                     RF_SHAREABLE | RF_ACTIVE);
2049                 if (que->res == NULL) {
2050                         device_printf(dev,"Unable to allocate"
2051                             " bus resource: que interrupt [%d]\n", vector);
2052                         return (ENXIO);
2053                 }
2054                 /* Set the handler function */
2055                 error = bus_setup_intr(dev, que->res,
2056                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2057                     ixl_msix_que, que, &que->tag);
2058                 if (error) {
2059                         que->res = NULL;
2060                         device_printf(dev, "Failed to register que handler");
2061                         return (error);
2062                 }
2063                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2064                 /* Bind the vector to a CPU */
2065 #ifdef RSS
2066                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2067 #endif
2068                 bus_bind_intr(dev, que->res, cpu_id);
2069                 que->msix = vector;
2070                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2071                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2072                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2073                     taskqueue_thread_enqueue, &que->tq);
2074 #ifdef RSS
2075                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
2076                     cpu_id, "%s (bucket %d)",
2077                     device_get_nameunit(dev), cpu_id);
2078 #else
2079                 taskqueue_start_threads(&que->tq, 1, PI_NET,
2080                     "%s que", device_get_nameunit(dev));
2081 #endif
2082         }
2083
2084         return (0);
2085 }
2086
2087
2088 /*
2089  * Allocate MSI/X vectors
2090  */
2091 static int
2092 ixl_init_msix(struct ixl_pf *pf)
2093 {
2094         device_t dev = pf->dev;
2095         int rid, want, vectors, queues, available;
2096
2097         /* Override by tuneable */
2098         if (ixl_enable_msix == 0)
2099                 goto msi;
2100
2101         /*
2102         ** When used in a virtualized environment 
2103         ** PCI BUSMASTER capability may not be set
2104         ** so explicity set it here and rewrite
2105         ** the ENABLE in the MSIX control register
2106         ** at this point to cause the host to
2107         ** successfully initialize us.
2108         */
2109         {
2110                 u16 pci_cmd_word;
2111                 int msix_ctrl;
2112                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2113                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2114                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2115                 pci_find_cap(dev, PCIY_MSIX, &rid);
2116                 rid += PCIR_MSIX_CTRL;
2117                 msix_ctrl = pci_read_config(dev, rid, 2);
2118                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2119                 pci_write_config(dev, rid, msix_ctrl, 2);
2120         }
2121
2122         /* First try MSI/X */
2123         rid = PCIR_BAR(IXL_BAR);
2124         pf->msix_mem = bus_alloc_resource_any(dev,
2125             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2126         if (!pf->msix_mem) {
2127                 /* May not be enabled */
2128                 device_printf(pf->dev,
2129                     "Unable to map MSIX table \n");
2130                 goto msi;
2131         }
2132
2133         available = pci_msix_count(dev); 
2134         if (available == 0) { /* system has msix disabled */
2135                 bus_release_resource(dev, SYS_RES_MEMORY,
2136                     rid, pf->msix_mem);
2137                 pf->msix_mem = NULL;
2138                 goto msi;
2139         }
2140
2141         /* Figure out a reasonable auto config value */
2142         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2143
2144         /* Override with hardcoded value if sane */
2145         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
2146                 queues = ixl_max_queues;
2147
2148 #ifdef  RSS
2149         /* If we're doing RSS, clamp at the number of RSS buckets */
2150         if (queues > rss_getnumbuckets())
2151                 queues = rss_getnumbuckets();
2152 #endif
2153
2154         /*
2155         ** Want one vector (RX/TX pair) per queue
2156         ** plus an additional for the admin queue.
2157         */
2158         want = queues + 1;
2159         if (want <= available)  /* Have enough */
2160                 vectors = want;
2161         else {
2162                 device_printf(pf->dev,
2163                     "MSIX Configuration Problem, "
2164                     "%d vectors available but %d wanted!\n",
2165                     available, want);
2166                 return (0); /* Will go to Legacy setup */
2167         }
2168
2169         if (pci_alloc_msix(dev, &vectors) == 0) {
2170                 device_printf(pf->dev,
2171                     "Using MSIX interrupts with %d vectors\n", vectors);
2172                 pf->msix = vectors;
2173                 pf->vsi.num_queues = queues;
2174 #ifdef RSS
2175                 /*
2176                  * If we're doing RSS, the number of queues needs to
2177                  * match the number of RSS buckets that are configured.
2178                  *
2179                  * + If there's more queues than RSS buckets, we'll end
2180                  *   up with queues that get no traffic.
2181                  *
2182                  * + If there's more RSS buckets than queues, we'll end
2183                  *   up having multiple RSS buckets map to the same queue,
2184                  *   so there'll be some contention.
2185                  */
2186                 if (queues != rss_getnumbuckets()) {
2187                         device_printf(dev,
2188                             "%s: queues (%d) != RSS buckets (%d)"
2189                             "; performance will be impacted.\n",
2190                             __func__, queues, rss_getnumbuckets());
2191                 }
2192 #endif
2193                 return (vectors);
2194         }
2195 msi:
2196         vectors = pci_msi_count(dev);
2197         pf->vsi.num_queues = 1;
2198         pf->msix = 1;
2199         ixl_max_queues = 1;
2200         ixl_enable_msix = 0;
2201         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2202                 device_printf(pf->dev,"Using an MSI interrupt\n");
2203         else {
2204                 pf->msix = 0;
2205                 device_printf(pf->dev,"Using a Legacy interrupt\n");
2206         }
2207         return (vectors);
2208 }
2209
2210
2211 /*
2212  * Plumb MSI/X vectors
2213  */
2214 static void
2215 ixl_configure_msix(struct ixl_pf *pf)
2216 {
2217         struct i40e_hw  *hw = &pf->hw;
2218         struct ixl_vsi *vsi = &pf->vsi;
2219         u32             reg;
2220         u16             vector = 1;
2221
2222         /* First set up the adminq - vector 0 */
2223         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2224         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2225
2226         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2227             I40E_PFINT_ICR0_ENA_GRST_MASK |
2228             I40E_PFINT_ICR0_HMC_ERR_MASK |
2229             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2230             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2231             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2232             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2233         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2234
2235         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2236         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2237
2238         wr32(hw, I40E_PFINT_DYN_CTL0,
2239             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2240             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2241
2242         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2243
2244         /* Next configure the queues */
2245         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2246                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2247                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2248
2249                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2250                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2251                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2252                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2253                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2254                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2255
2256                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2257                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2258                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2259                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2260                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2261                 if (i == (vsi->num_queues - 1))
2262                         reg |= (IXL_QUEUE_EOL
2263                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2264                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2265         }
2266 }
2267
2268 /*
2269  * Configure for MSI single vector operation 
2270  */
2271 static void
2272 ixl_configure_legacy(struct ixl_pf *pf)
2273 {
2274         struct i40e_hw  *hw = &pf->hw;
2275         u32             reg;
2276
2277
2278         wr32(hw, I40E_PFINT_ITR0(0), 0);
2279         wr32(hw, I40E_PFINT_ITR0(1), 0);
2280
2281
2282         /* Setup "other" causes */
2283         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2284             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2285             | I40E_PFINT_ICR0_ENA_GRST_MASK
2286             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2287             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2288             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2289             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2290             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2291             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2292             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2293             ;
2294         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2295
2296         /* SW_ITR_IDX = 0, but don't change INTENA */
2297         wr32(hw, I40E_PFINT_DYN_CTL0,
2298             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2299             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2300         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2301         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2302
2303         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2304         wr32(hw, I40E_PFINT_LNKLST0, 0);
2305
2306         /* Associate the queue pair to the vector and enable the q int */
2307         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2308             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2309             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2310         wr32(hw, I40E_QINT_RQCTL(0), reg);
2311
2312         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2313             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2314             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2315         wr32(hw, I40E_QINT_TQCTL(0), reg);
2316
2317         /* Next enable the queue pair */
2318         reg = rd32(hw, I40E_QTX_ENA(0));
2319         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2320         wr32(hw, I40E_QTX_ENA(0), reg);
2321
2322         reg = rd32(hw, I40E_QRX_ENA(0));
2323         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2324         wr32(hw, I40E_QRX_ENA(0), reg);
2325 }
2326
2327
2328 /*
2329  * Set the Initial ITR state
2330  */
2331 static void
2332 ixl_configure_itr(struct ixl_pf *pf)
2333 {
2334         struct i40e_hw          *hw = &pf->hw;
2335         struct ixl_vsi          *vsi = &pf->vsi;
2336         struct ixl_queue        *que = vsi->queues;
2337
2338         vsi->rx_itr_setting = ixl_rx_itr;
2339         if (ixl_dynamic_rx_itr)
2340                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2341         vsi->tx_itr_setting = ixl_tx_itr;
2342         if (ixl_dynamic_tx_itr)
2343                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2344         
2345         for (int i = 0; i < vsi->num_queues; i++, que++) {
2346                 struct tx_ring  *txr = &que->txr;
2347                 struct rx_ring  *rxr = &que->rxr;
2348
2349                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2350                     vsi->rx_itr_setting);
2351                 rxr->itr = vsi->rx_itr_setting;
2352                 rxr->latency = IXL_AVE_LATENCY;
2353                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2354                     vsi->tx_itr_setting);
2355                 txr->itr = vsi->tx_itr_setting;
2356                 txr->latency = IXL_AVE_LATENCY;
2357         }
2358 }
2359
2360
2361 static int
2362 ixl_allocate_pci_resources(struct ixl_pf *pf)
2363 {
2364         int             rid;
2365         device_t        dev = pf->dev;
2366
2367         rid = PCIR_BAR(0);
2368         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2369             &rid, RF_ACTIVE);
2370
2371         if (!(pf->pci_mem)) {
2372                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2373                 return (ENXIO);
2374         }
2375
2376         pf->osdep.mem_bus_space_tag =
2377                 rman_get_bustag(pf->pci_mem);
2378         pf->osdep.mem_bus_space_handle =
2379                 rman_get_bushandle(pf->pci_mem);
2380         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2381         pf->osdep.flush_reg = I40E_GLGEN_STAT;
2382         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2383
2384         pf->hw.back = &pf->osdep;
2385
2386         /*
2387         ** Now setup MSI or MSI/X, should
2388         ** return us the number of supported
2389         ** vectors. (Will be 1 for MSI)
2390         */
2391         pf->msix = ixl_init_msix(pf);
2392         return (0);
2393 }
2394
2395 static void
2396 ixl_free_pci_resources(struct ixl_pf * pf)
2397 {
2398         struct ixl_vsi          *vsi = &pf->vsi;
2399         struct ixl_queue        *que = vsi->queues;
2400         device_t                dev = pf->dev;
2401         int                     rid, memrid;
2402
2403         memrid = PCIR_BAR(IXL_BAR);
2404
2405         /* We may get here before stations are setup */
2406         if ((!ixl_enable_msix) || (que == NULL))
2407                 goto early;
2408
2409         /*
2410         **  Release all msix VSI resources:
2411         */
2412         for (int i = 0; i < vsi->num_queues; i++, que++) {
2413                 rid = que->msix + 1;
2414                 if (que->tag != NULL) {
2415                         bus_teardown_intr(dev, que->res, que->tag);
2416                         que->tag = NULL;
2417                 }
2418                 if (que->res != NULL)
2419                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2420         }
2421
2422 early:
2423         /* Clean the AdminQ interrupt last */
2424         if (pf->admvec) /* we are doing MSIX */
2425                 rid = pf->admvec + 1;
2426         else
2427                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2428
2429         if (pf->tag != NULL) {
2430                 bus_teardown_intr(dev, pf->res, pf->tag);
2431                 pf->tag = NULL;
2432         }
2433         if (pf->res != NULL)
2434                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2435
2436         if (pf->msix)
2437                 pci_release_msi(dev);
2438
2439         if (pf->msix_mem != NULL)
2440                 bus_release_resource(dev, SYS_RES_MEMORY,
2441                     memrid, pf->msix_mem);
2442
2443         if (pf->pci_mem != NULL)
2444                 bus_release_resource(dev, SYS_RES_MEMORY,
2445                     PCIR_BAR(0), pf->pci_mem);
2446
2447         return;
2448 }
2449
2450 static void
2451 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2452 {
2453         /* Display supported media types */
2454         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2455                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2456
2457         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2458                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2459         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2460                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2461         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2462                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2463
2464         if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2465             phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2466             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2467                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2468
2469         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2470                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2471         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2472                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2473         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2474                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2475
2476         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2477             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2478             phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2479             phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2480             phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2481                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2482         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2483                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2484         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2485                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2486
2487 #ifndef IFM_ETH_XTYPE
2488         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2489                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2490
2491         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2492             phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2493             phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2494             phy_type & (1 << I40E_PHY_TYPE_SFI))
2495                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2496         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2497                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2498         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2499                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2500
2501         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2502                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2503         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2504                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2505 #else
2506         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2507                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2508
2509         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2510             || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2511                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2512         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2513                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2514         if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2515                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2516         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2517                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2518         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2519                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2520
2521         if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2522                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2523
2524         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2525                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2526         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2527                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2528 #endif
2529 }
2530
2531 /*********************************************************************
2532  *
2533  *  Setup networking device structure and register an interface.
2534  *
2535  **********************************************************************/
2536 static int
2537 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2538 {
2539         struct ifnet            *ifp;
2540         struct i40e_hw          *hw = vsi->hw;
2541         struct ixl_queue        *que = vsi->queues;
2542         struct i40e_aq_get_phy_abilities_resp abilities;
2543         enum i40e_status_code aq_error = 0;
2544
2545         INIT_DEBUGOUT("ixl_setup_interface: begin");
2546
2547         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2548         if (ifp == NULL) {
2549                 device_printf(dev, "can not allocate ifnet structure\n");
2550                 return (-1);
2551         }
2552         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2553         ifp->if_mtu = ETHERMTU;
2554         if_initbaudrate(ifp, IF_Gbps(40));
2555         ifp->if_init = ixl_init;
2556         ifp->if_softc = vsi;
2557         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2558         ifp->if_ioctl = ixl_ioctl;
2559
2560 #if __FreeBSD_version >= 1100036
2561         if_setgetcounterfn(ifp, ixl_get_counter);
2562 #endif
2563
2564         ifp->if_transmit = ixl_mq_start;
2565
2566         ifp->if_qflush = ixl_qflush;
2567
2568         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2569
2570         vsi->max_frame_size =
2571             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2572             + ETHER_VLAN_ENCAP_LEN;
2573
2574         /*
2575          * Tell the upper layer(s) we support long frames.
2576          */
2577         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2578
2579         ifp->if_capabilities |= IFCAP_HWCSUM;
2580         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2581         ifp->if_capabilities |= IFCAP_TSO;
2582         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2583         ifp->if_capabilities |= IFCAP_LRO;
2584
2585         /* VLAN capabilties */
2586         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2587                              |  IFCAP_VLAN_HWTSO
2588                              |  IFCAP_VLAN_MTU
2589                              |  IFCAP_VLAN_HWCSUM;
2590         ifp->if_capenable = ifp->if_capabilities;
2591
2592         /*
2593         ** Don't turn this on by default, if vlans are
2594         ** created on another pseudo device (eg. lagg)
2595         ** then vlan events are not passed thru, breaking
2596         ** operation, but with HW FILTER off it works. If
2597         ** using vlans directly on the ixl driver you can
2598         ** enable this and get full hardware tag filtering.
2599         */
2600         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2601
2602         /*
2603          * Specify the media types supported by this adapter and register
2604          * callbacks to update media and link information
2605          */
2606         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2607                      ixl_media_status);
2608
2609         aq_error = i40e_aq_get_phy_capabilities(hw,
2610             FALSE, TRUE, &abilities, NULL);
2611         /* May need delay to detect fiber correctly */
2612         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2613                 i40e_msec_delay(200);
2614                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2615                     TRUE, &abilities, NULL);
2616         }
2617         if (aq_error) {
2618                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2619                         device_printf(dev, "Unknown PHY type detected!\n");
2620                 else
2621                         device_printf(dev,
2622                             "Error getting supported media types, err %d,"
2623                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2624                 return (0);
2625         }
2626
2627         ixl_add_ifmedia(vsi, abilities.phy_type);
2628
2629         /* Use autoselect media by default */
2630         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2631         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2632
2633         ether_ifattach(ifp, hw->mac.addr);
2634
2635         return (0);
2636 }
2637
2638 /*
2639 ** Run when the Admin Queue gets a
2640 ** link transition interrupt.
2641 */
2642 static void
2643 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2644 {
2645         struct i40e_hw  *hw = &pf->hw; 
2646         struct i40e_aqc_get_link_status *status =
2647             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2648         bool check;
2649
2650         hw->phy.get_link_info = TRUE;
2651         i40e_get_link_status(hw, &check);
2652         pf->link_up = check;
2653 #ifdef IXL_DEBUG
2654         printf("Link is %s\n", check ? "up":"down");
2655 #endif
2656         /* Report if Unqualified modules are found */
2657         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2658             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2659             (!(status->link_info & I40E_AQ_LINK_UP)))
2660                 device_printf(pf->dev, "Link failed because "
2661                     "an unqualified module was detected\n");
2662
2663         return;
2664 }
2665
2666 /*********************************************************************
2667  *
2668  *  Get Firmware Switch configuration
2669  *      - this will need to be more robust when more complex
2670  *        switch configurations are enabled.
2671  *
2672  **********************************************************************/
2673 static int
2674 ixl_switch_config(struct ixl_pf *pf)
2675 {
2676         struct i40e_hw  *hw = &pf->hw; 
2677         struct ixl_vsi  *vsi = &pf->vsi;
2678         device_t        dev = vsi->dev;
2679         struct i40e_aqc_get_switch_config_resp *sw_config;
2680         u8      aq_buf[I40E_AQ_LARGE_BUF];
2681         int     ret;
2682         u16     next = 0;
2683
2684         memset(&aq_buf, 0, sizeof(aq_buf));
2685         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2686         ret = i40e_aq_get_switch_config(hw, sw_config,
2687             sizeof(aq_buf), &next, NULL);
2688         if (ret) {
2689                 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2690                     ret);
2691                 return (ret);
2692         }
2693 #ifdef IXL_DEBUG
2694         device_printf(dev,
2695             "Switch config: header reported: %d in structure, %d total\n",
2696             sw_config->header.num_reported, sw_config->header.num_total);
2697         for (int i = 0; i < sw_config->header.num_reported; i++) {
2698                 device_printf(dev,
2699                     "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2700                     sw_config->element[i].element_type,
2701                     sw_config->element[i].seid,
2702                     sw_config->element[i].uplink_seid,
2703                     sw_config->element[i].downlink_seid);
2704         }
2705 #endif
2706         /* Simplified due to a single VSI at the moment */
2707         vsi->uplink_seid = sw_config->element[0].uplink_seid;
2708         vsi->downlink_seid = sw_config->element[0].downlink_seid;
2709         vsi->seid = sw_config->element[0].seid;
2710         return (ret);
2711 }
2712
2713 /*********************************************************************
2714  *
2715  *  Initialize the VSI:  this handles contexts, which means things
2716  *                       like the number of descriptors, buffer size,
2717  *                       plus we init the rings thru this function.
2718  *
2719  **********************************************************************/
2720 static int
2721 ixl_initialize_vsi(struct ixl_vsi *vsi)
2722 {
2723         struct ixl_pf           *pf = vsi->back;
2724         struct ixl_queue        *que = vsi->queues;
2725         device_t                dev = vsi->dev;
2726         struct i40e_hw          *hw = vsi->hw;
2727         struct i40e_vsi_context ctxt;
2728         int                     err = 0;
2729
2730         memset(&ctxt, 0, sizeof(ctxt));
2731         ctxt.seid = vsi->seid;
2732         if (pf->veb_seid != 0)
2733                 ctxt.uplink_seid = pf->veb_seid;
2734         ctxt.pf_num = hw->pf_id;
2735         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2736         if (err) {
2737                 device_printf(dev,"get vsi params failed %x!!\n", err);
2738                 return (err);
2739         }
2740 #ifdef IXL_DEBUG
2741         printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2742             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2743             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2744             ctxt.uplink_seid, ctxt.vsi_number,
2745             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2746             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2747             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2748 #endif
2749         /*
2750         ** Set the queue and traffic class bits
2751         **  - when multiple traffic classes are supported
2752         **    this will need to be more robust.
2753         */
2754         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2755         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2756         ctxt.info.queue_mapping[0] = 0; 
2757         ctxt.info.tc_mapping[0] = 0x0800; 
2758
2759         /* Set VLAN receive stripping mode */
2760         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2761         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2762         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2763             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2764         else
2765             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2766
2767         /* Keep copy of VSI info in VSI for statistic counters */
2768         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2769
2770         /* Reset VSI statistics */
2771         ixl_vsi_reset_stats(vsi);
2772         vsi->hw_filters_add = 0;
2773         vsi->hw_filters_del = 0;
2774
2775         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2776
2777         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2778         if (err) {
2779                 device_printf(dev,"update vsi params failed %x!!\n",
2780                    hw->aq.asq_last_status);
2781                 return (err);
2782         }
2783
2784         for (int i = 0; i < vsi->num_queues; i++, que++) {
2785                 struct tx_ring          *txr = &que->txr;
2786                 struct rx_ring          *rxr = &que->rxr;
2787                 struct i40e_hmc_obj_txq tctx;
2788                 struct i40e_hmc_obj_rxq rctx;
2789                 u32                     txctl;
2790                 u16                     size;
2791
2792
2793                 /* Setup the HMC TX Context  */
2794                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2795                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2796                 tctx.new_context = 1;
2797                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2798                 tctx.qlen = que->num_desc;
2799                 tctx.fc_ena = 0;
2800                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2801                 /* Enable HEAD writeback */
2802                 tctx.head_wb_ena = 1;
2803                 tctx.head_wb_addr = txr->dma.pa +
2804                     (que->num_desc * sizeof(struct i40e_tx_desc));
2805                 tctx.rdylist_act = 0;
2806                 err = i40e_clear_lan_tx_queue_context(hw, i);
2807                 if (err) {
2808                         device_printf(dev, "Unable to clear TX context\n");
2809                         break;
2810                 }
2811                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2812                 if (err) {
2813                         device_printf(dev, "Unable to set TX context\n");
2814                         break;
2815                 }
2816                 /* Associate the ring with this PF */
2817                 txctl = I40E_QTX_CTL_PF_QUEUE;
2818                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2819                     I40E_QTX_CTL_PF_INDX_MASK);
2820                 wr32(hw, I40E_QTX_CTL(i), txctl);
2821                 ixl_flush(hw);
2822
2823                 /* Do ring (re)init */
2824                 ixl_init_tx_ring(que);
2825
2826                 /* Next setup the HMC RX Context  */
2827                 if (vsi->max_frame_size <= MCLBYTES)
2828                         rxr->mbuf_sz = MCLBYTES;
2829                 else
2830                         rxr->mbuf_sz = MJUMPAGESIZE;
2831
2832                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2833
2834                 /* Set up an RX context for the HMC */
2835                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2836                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2837                 /* ignore header split for now */
2838                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2839                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2840                     vsi->max_frame_size : max_rxmax;
2841                 rctx.dtype = 0;
2842                 rctx.dsize = 1; /* do 32byte descriptors */
2843                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2844                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2845                 rctx.qlen = que->num_desc;
2846                 rctx.tphrdesc_ena = 1;
2847                 rctx.tphwdesc_ena = 1;
2848                 rctx.tphdata_ena = 0;
2849                 rctx.tphhead_ena = 0;
2850                 rctx.lrxqthresh = 2;
2851                 rctx.crcstrip = 1;
2852                 rctx.l2tsel = 1;
2853                 rctx.showiv = 1;
2854                 rctx.fc_ena = 0;
2855                 rctx.prefena = 1;
2856
2857                 err = i40e_clear_lan_rx_queue_context(hw, i);
2858                 if (err) {
2859                         device_printf(dev,
2860                             "Unable to clear RX context %d\n", i);
2861                         break;
2862                 }
2863                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2864                 if (err) {
2865                         device_printf(dev, "Unable to set RX context %d\n", i);
2866                         break;
2867                 }
2868                 err = ixl_init_rx_ring(que);
2869                 if (err) {
2870                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2871                         break;
2872                 }
2873                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2874 #ifdef DEV_NETMAP
2875                 /* preserve queue */
2876                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2877                         struct netmap_adapter *na = NA(vsi->ifp);
2878                         struct netmap_kring *kring = &na->rx_rings[i];
2879                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2880                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2881                 } else
2882 #endif /* DEV_NETMAP */
2883                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2884         }
2885         return (err);
2886 }
2887
2888
2889 /*********************************************************************
2890  *
2891  *  Free all VSI structs.
2892  *
2893  **********************************************************************/
2894 void
2895 ixl_free_vsi(struct ixl_vsi *vsi)
2896 {
2897         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2898         struct ixl_queue        *que = vsi->queues;
2899
2900         /* Free station queues */
2901         for (int i = 0; i < vsi->num_queues; i++, que++) {
2902                 struct tx_ring *txr = &que->txr;
2903                 struct rx_ring *rxr = &que->rxr;
2904         
2905                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2906                         continue;
2907                 IXL_TX_LOCK(txr);
2908                 ixl_free_que_tx(que);
2909                 if (txr->base)
2910                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2911                 IXL_TX_UNLOCK(txr);
2912                 IXL_TX_LOCK_DESTROY(txr);
2913
2914                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2915                         continue;
2916                 IXL_RX_LOCK(rxr);
2917                 ixl_free_que_rx(que);
2918                 if (rxr->base)
2919                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2920                 IXL_RX_UNLOCK(rxr);
2921                 IXL_RX_LOCK_DESTROY(rxr);
2922                 
2923         }
2924         free(vsi->queues, M_DEVBUF);
2925
2926         /* Free VSI filter list */
2927         ixl_free_mac_filters(vsi);
2928 }
2929
2930 static void
2931 ixl_free_mac_filters(struct ixl_vsi *vsi)
2932 {
2933         struct ixl_mac_filter *f;
2934
2935         while (!SLIST_EMPTY(&vsi->ftl)) {
2936                 f = SLIST_FIRST(&vsi->ftl);
2937                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2938                 free(f, M_DEVBUF);
2939         }
2940 }
2941
2942
2943 /*********************************************************************
2944  *
2945  *  Allocate memory for the VSI (virtual station interface) and their
2946  *  associated queues, rings and the descriptors associated with each,
2947  *  called only once at attach.
2948  *
2949  **********************************************************************/
2950 static int
2951 ixl_setup_stations(struct ixl_pf *pf)
2952 {
2953         device_t                dev = pf->dev;
2954         struct ixl_vsi          *vsi;
2955         struct ixl_queue        *que;
2956         struct tx_ring          *txr;
2957         struct rx_ring          *rxr;
2958         int                     rsize, tsize;
2959         int                     error = I40E_SUCCESS;
2960
2961         vsi = &pf->vsi;
2962         vsi->back = (void *)pf;
2963         vsi->hw = &pf->hw;
2964         vsi->id = 0;
2965         vsi->num_vlans = 0;
2966         vsi->back = pf;
2967
2968         /* Get memory for the station queues */
2969         if (!(vsi->queues =
2970             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2971             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2972                 device_printf(dev, "Unable to allocate queue memory\n");
2973                 error = ENOMEM;
2974                 goto early;
2975         }
2976
2977         for (int i = 0; i < vsi->num_queues; i++) {
2978                 que = &vsi->queues[i];
2979                 que->num_desc = ixl_ringsz;
2980                 que->me = i;
2981                 que->vsi = vsi;
2982                 /* mark the queue as active */
2983                 vsi->active_queues |= (u64)1 << que->me;
2984                 txr = &que->txr;
2985                 txr->que = que;
2986                 txr->tail = I40E_QTX_TAIL(que->me);
2987
2988                 /* Initialize the TX lock */
2989                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2990                     device_get_nameunit(dev), que->me);
2991                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2992                 /* Create the TX descriptor ring */
2993                 tsize = roundup2((que->num_desc *
2994                     sizeof(struct i40e_tx_desc)) +
2995                     sizeof(u32), DBA_ALIGN);
2996                 if (i40e_allocate_dma_mem(&pf->hw,
2997                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2998                         device_printf(dev,
2999                             "Unable to allocate TX Descriptor memory\n");
3000                         error = ENOMEM;
3001                         goto fail;
3002                 }
3003                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
3004                 bzero((void *)txr->base, tsize);
3005                 /* Now allocate transmit soft structs for the ring */
3006                 if (ixl_allocate_tx_data(que)) {
3007                         device_printf(dev,
3008                             "Critical Failure setting up TX structures\n");
3009                         error = ENOMEM;
3010                         goto fail;
3011                 }
3012                 /* Allocate a buf ring */
3013                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3014                     M_WAITOK, &txr->mtx);
3015                 if (txr->br == NULL) {
3016                         device_printf(dev,
3017                             "Critical Failure setting up TX buf ring\n");
3018                         error = ENOMEM;
3019                         goto fail;
3020                 }
3021
3022                 /*
3023                  * Next the RX queues...
3024                  */ 
3025                 rsize = roundup2(que->num_desc *
3026                     sizeof(union i40e_rx_desc), DBA_ALIGN);
3027                 rxr = &que->rxr;
3028                 rxr->que = que;
3029                 rxr->tail = I40E_QRX_TAIL(que->me);
3030
3031                 /* Initialize the RX side lock */
3032                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3033                     device_get_nameunit(dev), que->me);
3034                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3035
3036                 if (i40e_allocate_dma_mem(&pf->hw,
3037                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3038                         device_printf(dev,
3039                             "Unable to allocate RX Descriptor memory\n");
3040                         error = ENOMEM;
3041                         goto fail;
3042                 }
3043                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3044                 bzero((void *)rxr->base, rsize);
3045
3046                 /* Allocate receive soft structs for the ring*/
3047                 if (ixl_allocate_rx_data(que)) {
3048                         device_printf(dev,
3049                             "Critical Failure setting up receive structs\n");
3050                         error = ENOMEM;
3051                         goto fail;
3052                 }
3053         }
3054
3055         return (0);
3056
3057 fail:
3058         for (int i = 0; i < vsi->num_queues; i++) {
3059                 que = &vsi->queues[i];
3060                 rxr = &que->rxr;
3061                 txr = &que->txr;
3062                 if (rxr->base)
3063                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
3064                 if (txr->base)
3065                         i40e_free_dma_mem(&pf->hw, &txr->dma);
3066         }
3067
3068 early:
3069         return (error);
3070 }
3071
3072 /*
3073 ** Provide a update to the queue RX
3074 ** interrupt moderation value.
3075 */
3076 static void
3077 ixl_set_queue_rx_itr(struct ixl_queue *que)
3078 {
3079         struct ixl_vsi  *vsi = que->vsi;
3080         struct i40e_hw  *hw = vsi->hw;
3081         struct rx_ring  *rxr = &que->rxr;
3082         u16             rx_itr;
3083         u16             rx_latency = 0;
3084         int             rx_bytes;
3085
3086
3087         /* Idle, do nothing */
3088         if (rxr->bytes == 0)
3089                 return;
3090
3091         if (ixl_dynamic_rx_itr) {
3092                 rx_bytes = rxr->bytes/rxr->itr;
3093                 rx_itr = rxr->itr;
3094
3095                 /* Adjust latency range */
3096                 switch (rxr->latency) {
3097                 case IXL_LOW_LATENCY:
3098                         if (rx_bytes > 10) {
3099                                 rx_latency = IXL_AVE_LATENCY;
3100                                 rx_itr = IXL_ITR_20K;
3101                         }
3102                         break;
3103                 case IXL_AVE_LATENCY:
3104                         if (rx_bytes > 20) {
3105                                 rx_latency = IXL_BULK_LATENCY;
3106                                 rx_itr = IXL_ITR_8K;
3107                         } else if (rx_bytes <= 10) {
3108                                 rx_latency = IXL_LOW_LATENCY;
3109                                 rx_itr = IXL_ITR_100K;
3110                         }
3111                         break;
3112                 case IXL_BULK_LATENCY:
3113                         if (rx_bytes <= 20) {
3114                                 rx_latency = IXL_AVE_LATENCY;
3115                                 rx_itr = IXL_ITR_20K;
3116                         }
3117                         break;
3118                  }
3119
3120                 rxr->latency = rx_latency;
3121
3122                 if (rx_itr != rxr->itr) {
3123                         /* do an exponential smoothing */
3124                         rx_itr = (10 * rx_itr * rxr->itr) /
3125                             ((9 * rx_itr) + rxr->itr);
3126                         rxr->itr = rx_itr & IXL_MAX_ITR;
3127                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3128                             que->me), rxr->itr);
3129                 }
3130         } else { /* We may have have toggled to non-dynamic */
3131                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3132                         vsi->rx_itr_setting = ixl_rx_itr;
3133                 /* Update the hardware if needed */
3134                 if (rxr->itr != vsi->rx_itr_setting) {
3135                         rxr->itr = vsi->rx_itr_setting;
3136                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3137                             que->me), rxr->itr);
3138                 }
3139         }
3140         rxr->bytes = 0;
3141         rxr->packets = 0;
3142         return;
3143 }
3144
3145
3146 /*
3147 ** Provide a update to the queue TX
3148 ** interrupt moderation value.
3149 */
3150 static void
3151 ixl_set_queue_tx_itr(struct ixl_queue *que)
3152 {
3153         struct ixl_vsi  *vsi = que->vsi;
3154         struct i40e_hw  *hw = vsi->hw;
3155         struct tx_ring  *txr = &que->txr;
3156         u16             tx_itr;
3157         u16             tx_latency = 0;
3158         int             tx_bytes;
3159
3160
3161         /* Idle, do nothing */
3162         if (txr->bytes == 0)
3163                 return;
3164
3165         if (ixl_dynamic_tx_itr) {
3166                 tx_bytes = txr->bytes/txr->itr;
3167                 tx_itr = txr->itr;
3168
3169                 switch (txr->latency) {
3170                 case IXL_LOW_LATENCY:
3171                         if (tx_bytes > 10) {
3172                                 tx_latency = IXL_AVE_LATENCY;
3173                                 tx_itr = IXL_ITR_20K;
3174                         }
3175                         break;
3176                 case IXL_AVE_LATENCY:
3177                         if (tx_bytes > 20) {
3178                                 tx_latency = IXL_BULK_LATENCY;
3179                                 tx_itr = IXL_ITR_8K;
3180                         } else if (tx_bytes <= 10) {
3181                                 tx_latency = IXL_LOW_LATENCY;
3182                                 tx_itr = IXL_ITR_100K;
3183                         }
3184                         break;
3185                 case IXL_BULK_LATENCY:
3186                         if (tx_bytes <= 20) {
3187                                 tx_latency = IXL_AVE_LATENCY;
3188                                 tx_itr = IXL_ITR_20K;
3189                         }
3190                         break;
3191                 }
3192
3193                 txr->latency = tx_latency;
3194
3195                 if (tx_itr != txr->itr) {
3196                  /* do an exponential smoothing */
3197                         tx_itr = (10 * tx_itr * txr->itr) /
3198                             ((9 * tx_itr) + txr->itr);
3199                         txr->itr = tx_itr & IXL_MAX_ITR;
3200                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3201                             que->me), txr->itr);
3202                 }
3203
3204         } else { /* We may have have toggled to non-dynamic */
3205                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3206                         vsi->tx_itr_setting = ixl_tx_itr;
3207                 /* Update the hardware if needed */
3208                 if (txr->itr != vsi->tx_itr_setting) {
3209                         txr->itr = vsi->tx_itr_setting;
3210                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3211                             que->me), txr->itr);
3212                 }
3213         }
3214         txr->bytes = 0;
3215         txr->packets = 0;
3216         return;
3217 }
3218
3219 #define QUEUE_NAME_LEN 32
3220
3221 static void
3222 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3223     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3224 {
3225         struct sysctl_oid *tree;
3226         struct sysctl_oid_list *child;
3227         struct sysctl_oid_list *vsi_list;
3228
3229         tree = device_get_sysctl_tree(pf->dev);
3230         child = SYSCTL_CHILDREN(tree);
3231         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3232                                    CTLFLAG_RD, NULL, "VSI Number");
3233         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3234
3235         ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3236 }
3237
3238 static void
3239 ixl_add_hw_stats(struct ixl_pf *pf)
3240 {
3241         device_t dev = pf->dev;
3242         struct ixl_vsi *vsi = &pf->vsi;
3243         struct ixl_queue *queues = vsi->queues;
3244         struct i40e_hw_port_stats *pf_stats = &pf->stats;
3245
3246         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3247         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3248         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3249         struct sysctl_oid_list *vsi_list;
3250
3251         struct sysctl_oid *queue_node;
3252         struct sysctl_oid_list *queue_list;
3253
3254         struct tx_ring *txr;
3255         struct rx_ring *rxr;
3256         char queue_namebuf[QUEUE_NAME_LEN];
3257
3258         /* Driver statistics */
3259         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3260                         CTLFLAG_RD, &pf->watchdog_events,
3261                         "Watchdog timeouts");
3262         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3263                         CTLFLAG_RD, &pf->admin_irq,
3264                         "Admin Queue IRQ Handled");
3265
3266         ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3267         vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3268
3269         /* Queue statistics */
3270         for (int q = 0; q < vsi->num_queues; q++) {
3271                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3272                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3273                     OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3274                 queue_list = SYSCTL_CHILDREN(queue_node);
3275
3276                 txr = &(queues[q].txr);
3277                 rxr = &(queues[q].rxr);
3278
3279                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3280                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3281                                 "m_defrag() failed");
3282                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3283                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3284                                 "Driver dropped packets");
3285                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3286                                 CTLFLAG_RD, &(queues[q].irqs),
3287                                 "irqs on this queue");
3288                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3289                                 CTLFLAG_RD, &(queues[q].tso),
3290                                 "TSO");
3291                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3292                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3293                                 "Driver tx dma failure in xmit");
3294                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3295                                 CTLFLAG_RD, &(txr->no_desc),
3296                                 "Queue No Descriptor Available");
3297                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3298                                 CTLFLAG_RD, &(txr->total_packets),
3299                                 "Queue Packets Transmitted");
3300                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3301                                 CTLFLAG_RD, &(txr->tx_bytes),
3302                                 "Queue Bytes Transmitted");
3303                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3304                                 CTLFLAG_RD, &(rxr->rx_packets),
3305                                 "Queue Packets Received");
3306                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3307                                 CTLFLAG_RD, &(rxr->rx_bytes),
3308                                 "Queue Bytes Received");
3309         }
3310
3311         /* MAC stats */
3312         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3313 }
3314
3315 static void
3316 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3317         struct sysctl_oid_list *child,
3318         struct i40e_eth_stats *eth_stats)
3319 {
3320         struct ixl_sysctl_info ctls[] =
3321         {
3322                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3323                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3324                         "Unicast Packets Received"},
3325                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3326                         "Multicast Packets Received"},
3327                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3328                         "Broadcast Packets Received"},
3329                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3330                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3331                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3332                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3333                         "Multicast Packets Transmitted"},
3334                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3335                         "Broadcast Packets Transmitted"},
3336                 // end
3337                 {0,0,0}
3338         };
3339
3340         struct ixl_sysctl_info *entry = ctls;
3341         while (entry->stat != 0)
3342         {
3343                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3344                                 CTLFLAG_RD, entry->stat,
3345                                 entry->description);
3346                 entry++;
3347         }
3348 }
3349
3350 static void
3351 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3352         struct sysctl_oid_list *child,
3353         struct i40e_hw_port_stats *stats)
3354 {
3355         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3356                                     CTLFLAG_RD, NULL, "Mac Statistics");
3357         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3358
3359         struct i40e_eth_stats *eth_stats = &stats->eth;
3360         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3361
3362         struct ixl_sysctl_info ctls[] = 
3363         {
3364                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3365                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3366                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3367                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3368                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3369                 /* Packet Reception Stats */
3370                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3371                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3372                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3373                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3374                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3375                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3376                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3377                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3378                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3379                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3380                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3381                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3382                 /* Packet Transmission Stats */
3383                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3384                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3385                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3386                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3387                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3388                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3389                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3390                 /* Flow control */
3391                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3392                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3393                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3394                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3395                 /* End */
3396                 {0,0,0}
3397         };
3398
3399         struct ixl_sysctl_info *entry = ctls;
3400         while (entry->stat != 0)
3401         {
3402                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3403                                 CTLFLAG_RD, entry->stat,
3404                                 entry->description);
3405                 entry++;
3406         }
3407 }
3408
3409
3410 /*
3411 ** ixl_config_rss - setup RSS 
3412 **  - note this is done for the single vsi
3413 */
3414 static void ixl_config_rss(struct ixl_vsi *vsi)
3415 {
3416         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3417         struct i40e_hw  *hw = vsi->hw;
3418         u32             lut = 0;
3419         u64             set_hena = 0, hena;
3420         int             i, j, que_id;
3421 #ifdef RSS
3422         u32             rss_hash_config;
3423         u32             rss_seed[IXL_KEYSZ];
3424 #else
3425         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3426                             0x183cfd8c, 0xce880440, 0x580cbc3c,
3427                             0x35897377, 0x328b25e1, 0x4fa98922,
3428                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3429 #endif
3430
3431 #ifdef RSS
3432         /* Fetch the configured RSS key */
3433         rss_getkey((uint8_t *) &rss_seed);
3434 #endif
3435
3436         /* Fill out hash function seed */
3437         for (i = 0; i < IXL_KEYSZ; i++)
3438                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3439
3440         /* Enable PCTYPES for RSS: */
3441 #ifdef RSS
3442         rss_hash_config = rss_gethashconfig();
3443         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3444                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3445         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3446                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3447         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3448                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3449         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3450                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3451         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3452                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3453         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3454                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3455         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3456                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3457 #else
3458         set_hena =
3459                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3460                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3461                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3462                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3463                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3464                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3465                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3466                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3467                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3468                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3469                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3470 #endif
3471         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3472             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3473         hena |= set_hena;
3474         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3475         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3476
3477         /* Populate the LUT with max no. of queues in round robin fashion */
3478         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3479                 if (j == vsi->num_queues)
3480                         j = 0;
3481 #ifdef RSS
3482                 /*
3483                  * Fetch the RSS bucket id for the given indirection entry.
3484                  * Cap it at the number of configured buckets (which is
3485                  * num_queues.)
3486                  */
3487                 que_id = rss_get_indirection_to_bucket(i);
3488                 que_id = que_id % vsi->num_queues;
3489 #else
3490                 que_id = j;
3491 #endif
3492                 /* lut = 4-byte sliding window of 4 lut entries */
3493                 lut = (lut << 8) | (que_id &
3494                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3495                 /* On i = 3, we have 4 entries in lut; write to the register */
3496                 if ((i & 3) == 3)
3497                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3498         }
3499         ixl_flush(hw);
3500 }
3501
3502
3503 /*
3504 ** This routine is run via an vlan config EVENT,
3505 ** it enables us to use the HW Filter table since
3506 ** we can get the vlan id. This just creates the
3507 ** entry in the soft version of the VFTA, init will
3508 ** repopulate the real table.
3509 */
3510 static void
3511 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3512 {
3513         struct ixl_vsi  *vsi = ifp->if_softc;
3514         struct i40e_hw  *hw = vsi->hw;
3515         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3516
3517         if (ifp->if_softc !=  arg)   /* Not our event */
3518                 return;
3519
3520         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3521                 return;
3522
3523         IXL_PF_LOCK(pf);
3524         ++vsi->num_vlans;
3525         ixl_add_filter(vsi, hw->mac.addr, vtag);
3526         IXL_PF_UNLOCK(pf);
3527 }
3528
3529 /*
3530 ** This routine is run via an vlan
3531 ** unconfig EVENT, remove our entry
3532 ** in the soft vfta.
3533 */
3534 static void
3535 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3536 {
3537         struct ixl_vsi  *vsi = ifp->if_softc;
3538         struct i40e_hw  *hw = vsi->hw;
3539         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3540
3541         if (ifp->if_softc !=  arg)
3542                 return;
3543
3544         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3545                 return;
3546
3547         IXL_PF_LOCK(pf);
3548         --vsi->num_vlans;
3549         ixl_del_filter(vsi, hw->mac.addr, vtag);
3550         IXL_PF_UNLOCK(pf);
3551 }
3552
3553 /*
3554 ** This routine updates vlan filters, called by init
3555 ** it scans the filter table and then updates the hw
3556 ** after a soft reset.
3557 */
3558 static void
3559 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3560 {
3561         struct ixl_mac_filter   *f;
3562         int                     cnt = 0, flags;
3563
3564         if (vsi->num_vlans == 0)
3565                 return;
3566         /*
3567         ** Scan the filter list for vlan entries,
3568         ** mark them for addition and then call
3569         ** for the AQ update.
3570         */
3571         SLIST_FOREACH(f, &vsi->ftl, next) {
3572                 if (f->flags & IXL_FILTER_VLAN) {
3573                         f->flags |=
3574                             (IXL_FILTER_ADD |
3575                             IXL_FILTER_USED);
3576                         cnt++;
3577                 }
3578         }
3579         if (cnt == 0) {
3580                 printf("setup vlan: no filters found!\n");
3581                 return;
3582         }
3583         flags = IXL_FILTER_VLAN;
3584         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3585         ixl_add_hw_filters(vsi, flags, cnt);
3586         return;
3587 }
3588
3589 /*
3590 ** Initialize filter list and add filters that the hardware
3591 ** needs to know about.
3592 */
3593 static void
3594 ixl_init_filters(struct ixl_vsi *vsi)
3595 {
3596         /* Add broadcast address */
3597         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3598 }
3599
3600 /*
3601 ** This routine adds mulicast filters
3602 */
3603 static void
3604 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3605 {
3606         struct ixl_mac_filter *f;
3607
3608         /* Does one already exist */
3609         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3610         if (f != NULL)
3611                 return;
3612
3613         f = ixl_get_filter(vsi);
3614         if (f == NULL) {
3615                 printf("WARNING: no filter available!!\n");
3616                 return;
3617         }
3618         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3619         f->vlan = IXL_VLAN_ANY;
3620         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3621             | IXL_FILTER_MC);
3622
3623         return;
3624 }
3625
3626 static void
3627 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3628 {
3629
3630         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3631 }
3632
3633 /*
3634 ** This routine adds macvlan filters
3635 */
3636 static void
3637 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3638 {
3639         struct ixl_mac_filter   *f, *tmp;
3640         struct ixl_pf           *pf;
3641         device_t                dev;
3642
3643         DEBUGOUT("ixl_add_filter: begin");
3644
3645         pf = vsi->back;
3646         dev = pf->dev;
3647
3648         /* Does one already exist */
3649         f = ixl_find_filter(vsi, macaddr, vlan);
3650         if (f != NULL)
3651                 return;
3652         /*
3653         ** Is this the first vlan being registered, if so we
3654         ** need to remove the ANY filter that indicates we are
3655         ** not in a vlan, and replace that with a 0 filter.
3656         */
3657         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3658                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3659                 if (tmp != NULL) {
3660                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3661                         ixl_add_filter(vsi, macaddr, 0);
3662                 }
3663         }
3664
3665         f = ixl_get_filter(vsi);
3666         if (f == NULL) {
3667                 device_printf(dev, "WARNING: no filter available!!\n");
3668                 return;
3669         }
3670         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3671         f->vlan = vlan;
3672         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3673         if (f->vlan != IXL_VLAN_ANY)
3674                 f->flags |= IXL_FILTER_VLAN;
3675         else
3676                 vsi->num_macs++;
3677
3678         ixl_add_hw_filters(vsi, f->flags, 1);
3679         return;
3680 }
3681
3682 static void
3683 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3684 {
3685         struct ixl_mac_filter *f;
3686
3687         f = ixl_find_filter(vsi, macaddr, vlan);
3688         if (f == NULL)
3689                 return;
3690
3691         f->flags |= IXL_FILTER_DEL;
3692         ixl_del_hw_filters(vsi, 1);
3693         vsi->num_macs--;
3694
3695         /* Check if this is the last vlan removal */
3696         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3697                 /* Switch back to a non-vlan filter */
3698                 ixl_del_filter(vsi, macaddr, 0);
3699                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3700         }
3701         return;
3702 }
3703
3704 /*
3705 ** Find the filter with both matching mac addr and vlan id
3706 */
3707 static struct ixl_mac_filter *
3708 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3709 {
3710         struct ixl_mac_filter   *f;
3711         bool                    match = FALSE;
3712
3713         SLIST_FOREACH(f, &vsi->ftl, next) {
3714                 if (!cmp_etheraddr(f->macaddr, macaddr))
3715                         continue;
3716                 if (f->vlan == vlan) {
3717                         match = TRUE;
3718                         break;
3719                 }
3720         }       
3721
3722         if (!match)
3723                 f = NULL;
3724         return (f);
3725 }
3726
3727 /*
3728 ** This routine takes additions to the vsi filter
3729 ** table and creates an Admin Queue call to create
3730 ** the filters in the hardware.
3731 */
3732 static void
3733 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3734 {
3735         struct i40e_aqc_add_macvlan_element_data *a, *b;
3736         struct ixl_mac_filter   *f;
3737         struct ixl_pf           *pf;
3738         struct i40e_hw          *hw;
3739         device_t                dev;
3740         int                     err, j = 0;
3741
3742         pf = vsi->back;
3743         dev = pf->dev;
3744         hw = &pf->hw;
3745         IXL_PF_LOCK_ASSERT(pf);
3746
3747         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3748             M_DEVBUF, M_NOWAIT | M_ZERO);
3749         if (a == NULL) {
3750                 device_printf(dev, "add_hw_filters failed to get memory\n");
3751                 return;
3752         }
3753
3754         /*
3755         ** Scan the filter list, each time we find one
3756         ** we add it to the admin queue array and turn off
3757         ** the add bit.
3758         */
3759         SLIST_FOREACH(f, &vsi->ftl, next) {
3760                 if (f->flags == flags) {
3761                         b = &a[j]; // a pox on fvl long names :)
3762                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3763                         if (f->vlan == IXL_VLAN_ANY) {
3764                                 b->vlan_tag = 0;
3765                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3766                         } else {
3767                                 b->vlan_tag = f->vlan;
3768                                 b->flags = 0;
3769                         }
3770                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3771                         f->flags &= ~IXL_FILTER_ADD;
3772                         j++;
3773                 }
3774                 if (j == cnt)
3775                         break;
3776         }
3777         if (j > 0) {
3778                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3779                 if (err) 
3780                         device_printf(dev, "aq_add_macvlan err %d, "
3781                             "aq_error %d\n", err, hw->aq.asq_last_status);
3782                 else
3783                         vsi->hw_filters_add += j;
3784         }
3785         free(a, M_DEVBUF);
3786         return;
3787 }
3788
3789 /*
3790 ** This routine takes removals in the vsi filter
3791 ** table and creates an Admin Queue call to delete
3792 ** the filters in the hardware.
3793 */
3794 static void
3795 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3796 {
3797         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3798         struct ixl_pf           *pf;
3799         struct i40e_hw          *hw;
3800         device_t                dev;
3801         struct ixl_mac_filter   *f, *f_temp;
3802         int                     err, j = 0;
3803
3804         DEBUGOUT("ixl_del_hw_filters: begin\n");
3805
3806         pf = vsi->back;
3807         hw = &pf->hw;
3808         dev = pf->dev;
3809
3810         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3811             M_DEVBUF, M_NOWAIT | M_ZERO);
3812         if (d == NULL) {
3813                 printf("del hw filter failed to get memory\n");
3814                 return;
3815         }
3816
3817         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3818                 if (f->flags & IXL_FILTER_DEL) {
3819                         e = &d[j]; // a pox on fvl long names :)
3820                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3821                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3822                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3823                         /* delete entry from vsi list */
3824                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3825                         free(f, M_DEVBUF);
3826                         j++;
3827                 }
3828                 if (j == cnt)
3829                         break;
3830         }
3831         if (j > 0) {
3832                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3833                 /* NOTE: returns ENOENT every time but seems to work fine,
3834                    so we'll ignore that specific error. */
3835                 // TODO: Does this still occur on current firmwares?
3836                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3837                         int sc = 0;
3838                         for (int i = 0; i < j; i++)
3839                                 sc += (!d[i].error_code);
3840                         vsi->hw_filters_del += sc;
3841                         device_printf(dev,
3842                             "Failed to remove %d/%d filters, aq error %d\n",
3843                             j - sc, j, hw->aq.asq_last_status);
3844                 } else
3845                         vsi->hw_filters_del += j;
3846         }
3847         free(d, M_DEVBUF);
3848
3849         DEBUGOUT("ixl_del_hw_filters: end\n");
3850         return;
3851 }
3852
3853 static int
3854 ixl_enable_rings(struct ixl_vsi *vsi)
3855 {
3856         struct ixl_pf   *pf = vsi->back;
3857         struct i40e_hw  *hw = &pf->hw;
3858         int             index, error;
3859         u32             reg;
3860
3861         error = 0;
3862         for (int i = 0; i < vsi->num_queues; i++) {
3863                 index = vsi->first_queue + i;
3864                 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3865
3866                 reg = rd32(hw, I40E_QTX_ENA(index));
3867                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3868                     I40E_QTX_ENA_QENA_STAT_MASK;
3869                 wr32(hw, I40E_QTX_ENA(index), reg);
3870                 /* Verify the enable took */
3871                 for (int j = 0; j < 10; j++) {
3872                         reg = rd32(hw, I40E_QTX_ENA(index));
3873                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3874                                 break;
3875                         i40e_msec_delay(10);
3876                 }
3877                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3878                         device_printf(pf->dev, "TX queue %d disabled!\n",
3879                             index);
3880                         error = ETIMEDOUT;
3881                 }
3882
3883                 reg = rd32(hw, I40E_QRX_ENA(index));
3884                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3885                     I40E_QRX_ENA_QENA_STAT_MASK;
3886                 wr32(hw, I40E_QRX_ENA(index), reg);
3887                 /* Verify the enable took */
3888                 for (int j = 0; j < 10; j++) {
3889                         reg = rd32(hw, I40E_QRX_ENA(index));
3890                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3891                                 break;
3892                         i40e_msec_delay(10);
3893                 }
3894                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3895                         device_printf(pf->dev, "RX queue %d disabled!\n",
3896                             index);
3897                         error = ETIMEDOUT;
3898                 }
3899         }
3900
3901         return (error);
3902 }
3903
3904 static int
3905 ixl_disable_rings(struct ixl_vsi *vsi)
3906 {
3907         struct ixl_pf   *pf = vsi->back;
3908         struct i40e_hw  *hw = &pf->hw;
3909         int             index, error;
3910         u32             reg;
3911
3912         error = 0;
3913         for (int i = 0; i < vsi->num_queues; i++) {
3914                 index = vsi->first_queue + i;
3915
3916                 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3917                 i40e_usec_delay(500);
3918
3919                 reg = rd32(hw, I40E_QTX_ENA(index));
3920                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3921                 wr32(hw, I40E_QTX_ENA(index), reg);
3922                 /* Verify the disable took */
3923                 for (int j = 0; j < 10; j++) {
3924                         reg = rd32(hw, I40E_QTX_ENA(index));
3925                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3926                                 break;
3927                         i40e_msec_delay(10);
3928                 }
3929                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3930                         device_printf(pf->dev, "TX queue %d still enabled!\n",
3931                             index);
3932                         error = ETIMEDOUT;
3933                 }
3934
3935                 reg = rd32(hw, I40E_QRX_ENA(index));
3936                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3937                 wr32(hw, I40E_QRX_ENA(index), reg);
3938                 /* Verify the disable took */
3939                 for (int j = 0; j < 10; j++) {
3940                         reg = rd32(hw, I40E_QRX_ENA(index));
3941                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3942                                 break;
3943                         i40e_msec_delay(10);
3944                 }
3945                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3946                         device_printf(pf->dev, "RX queue %d still enabled!\n",
3947                             index);
3948                         error = ETIMEDOUT;
3949                 }
3950         }
3951
3952         return (error);
3953 }
3954
3955 /**
3956  * ixl_handle_mdd_event
3957  *
3958  * Called from interrupt handler to identify possibly malicious vfs
3959  * (But also detects events from the PF, as well)
3960  **/
3961 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3962 {
3963         struct i40e_hw *hw = &pf->hw;
3964         device_t dev = pf->dev;
3965         bool mdd_detected = false;
3966         bool pf_mdd_detected = false;
3967         u32 reg;
3968
3969         /* find what triggered the MDD event */
3970         reg = rd32(hw, I40E_GL_MDET_TX);
3971         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3972                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3973                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3974                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3975                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3976                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3977                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3978                 device_printf(dev,
3979                          "Malicious Driver Detection event 0x%02x"
3980                          " on TX queue %d pf number 0x%02x\n",
3981                          event, queue, pf_num);
3982                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3983                 mdd_detected = true;
3984         }
3985         reg = rd32(hw, I40E_GL_MDET_RX);
3986         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3987                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3988                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3989                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3990                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3991                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3992                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3993                 device_printf(dev,
3994                          "Malicious Driver Detection event 0x%02x"
3995                          " on RX queue %d of function 0x%02x\n",
3996                          event, queue, func);
3997                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3998                 mdd_detected = true;
3999         }
4000
4001         if (mdd_detected) {
4002                 reg = rd32(hw, I40E_PF_MDET_TX);
4003                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4004                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4005                         device_printf(dev,
4006                                  "MDD TX event is for this function 0x%08x",
4007                                  reg);
4008                         pf_mdd_detected = true;
4009                 }
4010                 reg = rd32(hw, I40E_PF_MDET_RX);
4011                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4012                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4013                         device_printf(dev,
4014                                  "MDD RX event is for this function 0x%08x",
4015                                  reg);
4016                         pf_mdd_detected = true;
4017                 }
4018         }
4019
4020         /* re-enable mdd interrupt cause */
4021         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4022         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4023         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4024         ixl_flush(hw);
4025 }
4026
4027 static void
4028 ixl_enable_intr(struct ixl_vsi *vsi)
4029 {
4030         struct i40e_hw          *hw = vsi->hw;
4031         struct ixl_queue        *que = vsi->queues;
4032
4033         if (ixl_enable_msix) {
4034                 ixl_enable_adminq(hw);
4035                 for (int i = 0; i < vsi->num_queues; i++, que++)
4036                         ixl_enable_queue(hw, que->me);
4037         } else
4038                 ixl_enable_legacy(hw);
4039 }
4040
4041 static void
4042 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4043 {
4044         struct i40e_hw          *hw = vsi->hw;
4045         struct ixl_queue        *que = vsi->queues;
4046
4047         for (int i = 0; i < vsi->num_queues; i++, que++)
4048                 ixl_disable_queue(hw, que->me);
4049 }
4050
4051 static void
4052 ixl_disable_intr(struct ixl_vsi *vsi)
4053 {
4054         struct i40e_hw          *hw = vsi->hw;
4055
4056         if (ixl_enable_msix)
4057                 ixl_disable_adminq(hw);
4058         else
4059                 ixl_disable_legacy(hw);
4060 }
4061
4062 static void
4063 ixl_enable_adminq(struct i40e_hw *hw)
4064 {
4065         u32             reg;
4066
4067         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4068             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4069             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4070         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4071         ixl_flush(hw);
4072         return;
4073 }
4074
4075 static void
4076 ixl_disable_adminq(struct i40e_hw *hw)
4077 {
4078         u32             reg;
4079
4080         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4081         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4082
4083         return;
4084 }
4085
4086 static void
4087 ixl_enable_queue(struct i40e_hw *hw, int id)
4088 {
4089         u32             reg;
4090
4091         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4092             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4093             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4094         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4095 }
4096
4097 static void
4098 ixl_disable_queue(struct i40e_hw *hw, int id)
4099 {
4100         u32             reg;
4101
4102         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4103         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4104
4105         return;
4106 }
4107
4108 static void
4109 ixl_enable_legacy(struct i40e_hw *hw)
4110 {
4111         u32             reg;
4112         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4113             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4114             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4115         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4116 }
4117
4118 static void
4119 ixl_disable_legacy(struct i40e_hw *hw)
4120 {
4121         u32             reg;
4122
4123         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4124         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4125
4126         return;
4127 }
4128
4129 static void
4130 ixl_update_stats_counters(struct ixl_pf *pf)
4131 {
4132         struct i40e_hw  *hw = &pf->hw;
4133         struct ixl_vsi  *vsi = &pf->vsi;
4134         struct ixl_vf   *vf;
4135
4136         struct i40e_hw_port_stats *nsd = &pf->stats;
4137         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4138
4139         /* Update hw stats */
4140         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4141                            pf->stat_offsets_loaded,
4142                            &osd->crc_errors, &nsd->crc_errors);
4143         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4144                            pf->stat_offsets_loaded,
4145                            &osd->illegal_bytes, &nsd->illegal_bytes);
4146         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4147                            I40E_GLPRT_GORCL(hw->port),
4148                            pf->stat_offsets_loaded,
4149                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4150         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4151                            I40E_GLPRT_GOTCL(hw->port),
4152                            pf->stat_offsets_loaded,
4153                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4154         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4155                            pf->stat_offsets_loaded,
4156                            &osd->eth.rx_discards,
4157                            &nsd->eth.rx_discards);
4158         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4159                            I40E_GLPRT_UPRCL(hw->port),
4160                            pf->stat_offsets_loaded,
4161                            &osd->eth.rx_unicast,
4162                            &nsd->eth.rx_unicast);
4163         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4164                            I40E_GLPRT_UPTCL(hw->port),
4165                            pf->stat_offsets_loaded,
4166                            &osd->eth.tx_unicast,
4167                            &nsd->eth.tx_unicast);
4168         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4169                            I40E_GLPRT_MPRCL(hw->port),
4170                            pf->stat_offsets_loaded,
4171                            &osd->eth.rx_multicast,
4172                            &nsd->eth.rx_multicast);
4173         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4174                            I40E_GLPRT_MPTCL(hw->port),
4175                            pf->stat_offsets_loaded,
4176                            &osd->eth.tx_multicast,
4177                            &nsd->eth.tx_multicast);
4178         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4179                            I40E_GLPRT_BPRCL(hw->port),
4180                            pf->stat_offsets_loaded,
4181                            &osd->eth.rx_broadcast,
4182                            &nsd->eth.rx_broadcast);
4183         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4184                            I40E_GLPRT_BPTCL(hw->port),
4185                            pf->stat_offsets_loaded,
4186                            &osd->eth.tx_broadcast,
4187                            &nsd->eth.tx_broadcast);
4188
4189         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4190                            pf->stat_offsets_loaded,
4191                            &osd->tx_dropped_link_down,
4192                            &nsd->tx_dropped_link_down);
4193         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4194                            pf->stat_offsets_loaded,
4195                            &osd->mac_local_faults,
4196                            &nsd->mac_local_faults);
4197         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4198                            pf->stat_offsets_loaded,
4199                            &osd->mac_remote_faults,
4200                            &nsd->mac_remote_faults);
4201         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4202                            pf->stat_offsets_loaded,
4203                            &osd->rx_length_errors,
4204                            &nsd->rx_length_errors);
4205
4206         /* Flow control (LFC) stats */
4207         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4208                            pf->stat_offsets_loaded,
4209                            &osd->link_xon_rx, &nsd->link_xon_rx);
4210         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4211                            pf->stat_offsets_loaded,
4212                            &osd->link_xon_tx, &nsd->link_xon_tx);
4213         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4214                            pf->stat_offsets_loaded,
4215                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
4216         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4217                            pf->stat_offsets_loaded,
4218                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
4219
4220         /* Packet size stats rx */
4221         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4222                            I40E_GLPRT_PRC64L(hw->port),
4223                            pf->stat_offsets_loaded,
4224                            &osd->rx_size_64, &nsd->rx_size_64);
4225         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4226                            I40E_GLPRT_PRC127L(hw->port),
4227                            pf->stat_offsets_loaded,
4228                            &osd->rx_size_127, &nsd->rx_size_127);
4229         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4230                            I40E_GLPRT_PRC255L(hw->port),
4231                            pf->stat_offsets_loaded,
4232                            &osd->rx_size_255, &nsd->rx_size_255);
4233         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4234                            I40E_GLPRT_PRC511L(hw->port),
4235                            pf->stat_offsets_loaded,
4236                            &osd->rx_size_511, &nsd->rx_size_511);
4237         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4238                            I40E_GLPRT_PRC1023L(hw->port),
4239                            pf->stat_offsets_loaded,
4240                            &osd->rx_size_1023, &nsd->rx_size_1023);
4241         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4242                            I40E_GLPRT_PRC1522L(hw->port),
4243                            pf->stat_offsets_loaded,
4244                            &osd->rx_size_1522, &nsd->rx_size_1522);
4245         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4246                            I40E_GLPRT_PRC9522L(hw->port),
4247                            pf->stat_offsets_loaded,
4248                            &osd->rx_size_big, &nsd->rx_size_big);
4249
4250         /* Packet size stats tx */
4251         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4252                            I40E_GLPRT_PTC64L(hw->port),
4253                            pf->stat_offsets_loaded,
4254                            &osd->tx_size_64, &nsd->tx_size_64);
4255         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4256                            I40E_GLPRT_PTC127L(hw->port),
4257                            pf->stat_offsets_loaded,
4258                            &osd->tx_size_127, &nsd->tx_size_127);
4259         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4260                            I40E_GLPRT_PTC255L(hw->port),
4261                            pf->stat_offsets_loaded,
4262                            &osd->tx_size_255, &nsd->tx_size_255);
4263         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4264                            I40E_GLPRT_PTC511L(hw->port),
4265                            pf->stat_offsets_loaded,
4266                            &osd->tx_size_511, &nsd->tx_size_511);
4267         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4268                            I40E_GLPRT_PTC1023L(hw->port),
4269                            pf->stat_offsets_loaded,
4270                            &osd->tx_size_1023, &nsd->tx_size_1023);
4271         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4272                            I40E_GLPRT_PTC1522L(hw->port),
4273                            pf->stat_offsets_loaded,
4274                            &osd->tx_size_1522, &nsd->tx_size_1522);
4275         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4276                            I40E_GLPRT_PTC9522L(hw->port),
4277                            pf->stat_offsets_loaded,
4278                            &osd->tx_size_big, &nsd->tx_size_big);
4279
4280         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4281                            pf->stat_offsets_loaded,
4282                            &osd->rx_undersize, &nsd->rx_undersize);
4283         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4284                            pf->stat_offsets_loaded,
4285                            &osd->rx_fragments, &nsd->rx_fragments);
4286         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4287                            pf->stat_offsets_loaded,
4288                            &osd->rx_oversize, &nsd->rx_oversize);
4289         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4290                            pf->stat_offsets_loaded,
4291                            &osd->rx_jabber, &nsd->rx_jabber);
4292         pf->stat_offsets_loaded = true;
4293         /* End hw stats */
4294
4295         /* Update vsi stats */
4296         ixl_update_vsi_stats(vsi);
4297
4298         for (int i = 0; i < pf->num_vfs; i++) {
4299                 vf = &pf->vfs[i];
4300                 if (vf->vf_flags & VF_FLAG_ENABLED)
4301                         ixl_update_eth_stats(&pf->vfs[i].vsi);
4302         }
4303 }
4304
4305 /*
4306 ** Tasklet handler for MSIX Adminq interrupts
4307 **  - do outside interrupt since it might sleep
4308 */
4309 static void
4310 ixl_do_adminq(void *context, int pending)
4311 {
4312         struct ixl_pf                   *pf = context;
4313         struct i40e_hw                  *hw = &pf->hw;
4314         struct ixl_vsi                  *vsi = &pf->vsi;
4315         struct i40e_arq_event_info      event;
4316         i40e_status                     ret;
4317         u32                             reg, loop = 0;
4318         u16                             opcode, result;
4319
4320         event.buf_len = IXL_AQ_BUF_SZ;
4321         event.msg_buf = malloc(event.buf_len,
4322             M_DEVBUF, M_NOWAIT | M_ZERO);
4323         if (!event.msg_buf) {
4324                 printf("Unable to allocate adminq memory\n");
4325                 return;
4326         }
4327
4328         IXL_PF_LOCK(pf);
4329         /* clean and process any events */
4330         do {
4331                 ret = i40e_clean_arq_element(hw, &event, &result);
4332                 if (ret)
4333                         break;
4334                 opcode = LE16_TO_CPU(event.desc.opcode);
4335                 switch (opcode) {
4336                 case i40e_aqc_opc_get_link_status:
4337                         ixl_link_event(pf, &event);
4338                         ixl_update_link_status(pf);
4339                         break;
4340                 case i40e_aqc_opc_send_msg_to_pf:
4341 #ifdef PCI_IOV
4342                         ixl_handle_vf_msg(pf, &event);
4343 #endif
4344                         break;
4345                 case i40e_aqc_opc_event_lan_overflow:
4346                         break;
4347                 default:
4348 #ifdef IXL_DEBUG
4349                         printf("AdminQ unknown event %x\n", opcode);
4350 #endif
4351                         break;
4352                 }
4353
4354         } while (result && (loop++ < IXL_ADM_LIMIT));
4355
4356         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4357         reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4358         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4359         free(event.msg_buf, M_DEVBUF);
4360
4361         /*
4362          * If there are still messages to process, reschedule ourselves.
4363          * Otherwise, re-enable our interrupt and go to sleep.
4364          */
4365         if (result > 0)
4366                 taskqueue_enqueue(pf->tq, &pf->adminq);
4367         else
4368                 ixl_enable_intr(vsi);
4369
4370         IXL_PF_UNLOCK(pf);
4371 }
4372
4373 #ifdef IXL_DEBUG_SYSCTL
4374 static int
4375 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4376 {
4377         struct ixl_pf   *pf;
4378         int             error, input = 0;
4379
4380         error = sysctl_handle_int(oidp, &input, 0, req);
4381
4382         if (error || !req->newptr)
4383                 return (error);
4384
4385         if (input == 1) {
4386                 pf = (struct ixl_pf *)arg1;
4387                 ixl_print_debug_info(pf);
4388         }
4389
4390         return (error);
4391 }
4392
4393 static void
4394 ixl_print_debug_info(struct ixl_pf *pf)
4395 {
4396         struct i40e_hw          *hw = &pf->hw;
4397         struct ixl_vsi          *vsi = &pf->vsi;
4398         struct ixl_queue        *que = vsi->queues;
4399         struct rx_ring          *rxr = &que->rxr;
4400         struct tx_ring          *txr = &que->txr;
4401         u32                     reg;    
4402
4403
4404         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4405         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4406         printf("RX next check = %x\n", rxr->next_check);
4407         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4408         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4409         printf("TX desc avail = %x\n", txr->avail);
4410
4411         reg = rd32(hw, I40E_GLV_GORCL(0xc));
4412          printf("RX Bytes = %x\n", reg);
4413         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4414          printf("Port RX Bytes = %x\n", reg);
4415         reg = rd32(hw, I40E_GLV_RDPC(0xc));
4416          printf("RX discard = %x\n", reg);
4417         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4418          printf("Port RX discard = %x\n", reg);
4419
4420         reg = rd32(hw, I40E_GLV_TEPC(0xc));
4421          printf("TX errors = %x\n", reg);
4422         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4423          printf("TX Bytes = %x\n", reg);
4424
4425         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4426          printf("RX undersize = %x\n", reg);
4427         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4428          printf("RX fragments = %x\n", reg);
4429         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4430          printf("RX oversize = %x\n", reg);
4431         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4432          printf("RX length error = %x\n", reg);
4433         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4434          printf("mac remote fault = %x\n", reg);
4435         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4436          printf("mac local fault = %x\n", reg);
4437 }
4438 #endif
4439
4440 /**
4441  * Update VSI-specific ethernet statistics counters.
4442  **/
4443 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4444 {
4445         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4446         struct i40e_hw *hw = &pf->hw;
4447         struct i40e_eth_stats *es;
4448         struct i40e_eth_stats *oes;
4449         struct i40e_hw_port_stats *nsd;
4450         u16 stat_idx = vsi->info.stat_counter_idx;
4451
4452         es = &vsi->eth_stats;
4453         oes = &vsi->eth_stats_offsets;
4454         nsd = &pf->stats;
4455
4456         /* Gather up the stats that the hw collects */
4457         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4458                            vsi->stat_offsets_loaded,
4459                            &oes->tx_errors, &es->tx_errors);
4460         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4461                            vsi->stat_offsets_loaded,
4462                            &oes->rx_discards, &es->rx_discards);
4463
4464         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4465                            I40E_GLV_GORCL(stat_idx),
4466                            vsi->stat_offsets_loaded,
4467                            &oes->rx_bytes, &es->rx_bytes);
4468         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4469                            I40E_GLV_UPRCL(stat_idx),
4470                            vsi->stat_offsets_loaded,
4471                            &oes->rx_unicast, &es->rx_unicast);
4472         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4473                            I40E_GLV_MPRCL(stat_idx),
4474                            vsi->stat_offsets_loaded,
4475                            &oes->rx_multicast, &es->rx_multicast);
4476         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4477                            I40E_GLV_BPRCL(stat_idx),
4478                            vsi->stat_offsets_loaded,
4479                            &oes->rx_broadcast, &es->rx_broadcast);
4480
4481         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4482                            I40E_GLV_GOTCL(stat_idx),
4483                            vsi->stat_offsets_loaded,
4484                            &oes->tx_bytes, &es->tx_bytes);
4485         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4486                            I40E_GLV_UPTCL(stat_idx),
4487                            vsi->stat_offsets_loaded,
4488                            &oes->tx_unicast, &es->tx_unicast);
4489         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4490                            I40E_GLV_MPTCL(stat_idx),
4491                            vsi->stat_offsets_loaded,
4492                            &oes->tx_multicast, &es->tx_multicast);
4493         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4494                            I40E_GLV_BPTCL(stat_idx),
4495                            vsi->stat_offsets_loaded,
4496                            &oes->tx_broadcast, &es->tx_broadcast);
4497         vsi->stat_offsets_loaded = true;
4498 }
4499
4500 static void
4501 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4502 {
4503         struct ixl_pf           *pf;
4504         struct ifnet            *ifp;
4505         struct i40e_eth_stats   *es;
4506         u64                     tx_discards;
4507
4508         struct i40e_hw_port_stats *nsd;
4509
4510         pf = vsi->back;
4511         ifp = vsi->ifp;
4512         es = &vsi->eth_stats;
4513         nsd = &pf->stats;
4514
4515         ixl_update_eth_stats(vsi);
4516
4517         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4518         for (int i = 0; i < vsi->num_queues; i++)
4519                 tx_discards += vsi->queues[i].txr.br->br_drops;
4520
4521         /* Update ifnet stats */
4522         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4523                            es->rx_multicast +
4524                            es->rx_broadcast);
4525         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4526                            es->tx_multicast +
4527                            es->tx_broadcast);
4528         IXL_SET_IBYTES(vsi, es->rx_bytes);
4529         IXL_SET_OBYTES(vsi, es->tx_bytes);
4530         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4531         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4532
4533         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4534             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4535             nsd->rx_jabber);
4536         IXL_SET_OERRORS(vsi, es->tx_errors);
4537         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4538         IXL_SET_OQDROPS(vsi, tx_discards);
4539         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4540         IXL_SET_COLLISIONS(vsi, 0);
4541 }
4542
4543 /**
4544  * Reset all of the stats for the given pf
4545  **/
4546 void ixl_pf_reset_stats(struct ixl_pf *pf)
4547 {
4548         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4549         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4550         pf->stat_offsets_loaded = false;
4551 }
4552
4553 /**
4554  * Resets all stats of the given vsi
4555  **/
4556 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4557 {
4558         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4559         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4560         vsi->stat_offsets_loaded = false;
4561 }
4562
4563 /**
4564  * Read and update a 48 bit stat from the hw
4565  *
4566  * Since the device stats are not reset at PFReset, they likely will not
4567  * be zeroed when the driver starts.  We'll save the first values read
4568  * and use them as offsets to be subtracted from the raw values in order
4569  * to report stats that count from zero.
4570  **/
4571 static void
4572 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4573         bool offset_loaded, u64 *offset, u64 *stat)
4574 {
4575         u64 new_data;
4576
4577 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4578         new_data = rd64(hw, loreg);
4579 #else
4580         /*
4581          * Use two rd32's instead of one rd64; FreeBSD versions before
4582          * 10 don't support 8 byte bus reads/writes.
4583          */
4584         new_data = rd32(hw, loreg);
4585         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4586 #endif
4587
4588         if (!offset_loaded)
4589                 *offset = new_data;
4590         if (new_data >= *offset)
4591                 *stat = new_data - *offset;
4592         else
4593                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4594         *stat &= 0xFFFFFFFFFFFFULL;
4595 }
4596
4597 /**
4598  * Read and update a 32 bit stat from the hw
4599  **/
4600 static void
4601 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4602         bool offset_loaded, u64 *offset, u64 *stat)
4603 {
4604         u32 new_data;
4605
4606         new_data = rd32(hw, reg);
4607         if (!offset_loaded)
4608                 *offset = new_data;
4609         if (new_data >= *offset)
4610                 *stat = (u32)(new_data - *offset);
4611         else
4612                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4613 }
4614
4615 /*
4616 ** Set flow control using sysctl:
4617 **      0 - off
4618 **      1 - rx pause
4619 **      2 - tx pause
4620 **      3 - full
4621 */
4622 static int
4623 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4624 {
4625         /*
4626          * TODO: ensure flow control is disabled if
4627          * priority flow control is enabled
4628          *
4629          * TODO: ensure tx CRC by hardware should be enabled
4630          * if tx flow control is enabled.
4631          */
4632         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4633         struct i40e_hw *hw = &pf->hw;
4634         device_t dev = pf->dev;
4635         int error = 0;
4636         enum i40e_status_code aq_error = 0;
4637         u8 fc_aq_err = 0;
4638
4639         /* Get request */
4640         error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4641         if ((error) || (req->newptr == NULL))
4642                 return (error);
4643         if (pf->fc < 0 || pf->fc > 3) {
4644                 device_printf(dev,
4645                     "Invalid fc mode; valid modes are 0 through 3\n");
4646                 return (EINVAL);
4647         }
4648
4649         /*
4650         ** Changing flow control mode currently does not work on
4651         ** 40GBASE-CR4 PHYs
4652         */
4653         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4654             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4655                 device_printf(dev, "Changing flow control mode unsupported"
4656                     " on 40GBase-CR4 media.\n");
4657                 return (ENODEV);
4658         }
4659
4660         /* Set fc ability for port */
4661         hw->fc.requested_mode = pf->fc;
4662         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4663         if (aq_error) {
4664                 device_printf(dev,
4665                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4666                     __func__, aq_error, fc_aq_err);
4667                 return (EAGAIN);
4668         }
4669
4670         return (0);
4671 }
4672
4673 static int
4674 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4675 {
4676         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4677         struct i40e_hw *hw = &pf->hw;
4678         int error = 0, index = 0;
4679
4680         char *speeds[] = {
4681                 "Unknown",
4682                 "100M",
4683                 "1G",
4684                 "10G",
4685                 "40G",
4686                 "20G"
4687         };
4688
4689         ixl_update_link_status(pf);
4690
4691         switch (hw->phy.link_info.link_speed) {
4692         case I40E_LINK_SPEED_100MB:
4693                 index = 1;
4694                 break;
4695         case I40E_LINK_SPEED_1GB:
4696                 index = 2;
4697                 break;
4698         case I40E_LINK_SPEED_10GB:
4699                 index = 3;
4700                 break;
4701         case I40E_LINK_SPEED_40GB:
4702                 index = 4;
4703                 break;
4704         case I40E_LINK_SPEED_20GB:
4705                 index = 5;
4706                 break;
4707         case I40E_LINK_SPEED_UNKNOWN:
4708         default:
4709                 index = 0;
4710                 break;
4711         }
4712
4713         error = sysctl_handle_string(oidp, speeds[index],
4714             strlen(speeds[index]), req);
4715         return (error);
4716 }
4717
4718 static int
4719 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4720 {
4721         struct i40e_hw *hw = &pf->hw;
4722         device_t dev = pf->dev;
4723         struct i40e_aq_get_phy_abilities_resp abilities;
4724         struct i40e_aq_set_phy_config config;
4725         enum i40e_status_code aq_error = 0;
4726
4727         /* Get current capability information */
4728         aq_error = i40e_aq_get_phy_capabilities(hw,
4729             FALSE, FALSE, &abilities, NULL);
4730         if (aq_error) {
4731                 device_printf(dev,
4732                     "%s: Error getting phy capabilities %d,"
4733                     " aq error: %d\n", __func__, aq_error,
4734                     hw->aq.asq_last_status);
4735                 return (EAGAIN);
4736         }
4737
4738         /* Prepare new config */
4739         bzero(&config, sizeof(config));
4740         config.phy_type = abilities.phy_type;
4741         config.abilities = abilities.abilities
4742             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4743         config.eee_capability = abilities.eee_capability;
4744         config.eeer = abilities.eeer_val;
4745         config.low_power_ctrl = abilities.d3_lpan;
4746         /* Translate into aq cmd link_speed */
4747         if (speeds & 0x8)
4748                 config.link_speed |= I40E_LINK_SPEED_20GB;
4749         if (speeds & 0x4)
4750                 config.link_speed |= I40E_LINK_SPEED_10GB;
4751         if (speeds & 0x2)
4752                 config.link_speed |= I40E_LINK_SPEED_1GB;
4753         if (speeds & 0x1)
4754                 config.link_speed |= I40E_LINK_SPEED_100MB;
4755
4756         /* Do aq command & restart link */
4757         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4758         if (aq_error) {
4759                 device_printf(dev,
4760                     "%s: Error setting new phy config %d,"
4761                     " aq error: %d\n", __func__, aq_error,
4762                     hw->aq.asq_last_status);
4763                 return (EAGAIN);
4764         }
4765
4766         /*
4767         ** This seems a bit heavy handed, but we
4768         ** need to get a reinit on some devices
4769         */
4770         IXL_PF_LOCK(pf);
4771         ixl_stop(pf);
4772         ixl_init_locked(pf);
4773         IXL_PF_UNLOCK(pf);
4774
4775         return (0);
4776 }
4777
4778 /*
4779 ** Control link advertise speed:
4780 **      Flags:
4781 **      0x1 - advertise 100 Mb
4782 **      0x2 - advertise 1G
4783 **      0x4 - advertise 10G
4784 **      0x8 - advertise 20G
4785 **
4786 ** Does not work on 40G devices.
4787 */
4788 static int
4789 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4790 {
4791         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4792         struct i40e_hw *hw = &pf->hw;
4793         device_t dev = pf->dev;
4794         int requested_ls = 0;
4795         int error = 0;
4796
4797         /*
4798         ** FW doesn't support changing advertised speed
4799         ** for 40G devices; speed is always 40G.
4800         */
4801         if (i40e_is_40G_device(hw->device_id))
4802                 return (ENODEV);
4803
4804         /* Read in new mode */
4805         requested_ls = pf->advertised_speed;
4806         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4807         if ((error) || (req->newptr == NULL))
4808                 return (error);
4809         /* Check for sane value */
4810         if (requested_ls < 0x1 || requested_ls > 0xE) {
4811                 device_printf(dev, "Invalid advertised speed; "
4812                     "valid modes are 0x1 through 0xE\n");
4813                 return (EINVAL);
4814         }
4815         /* Then check for validity based on adapter type */
4816         switch (hw->device_id) {
4817         case I40E_DEV_ID_10G_BASE_T:
4818                 if (requested_ls & 0x8) {
4819                         device_printf(dev,
4820                             "20Gbs speed not supported on this device.\n");
4821                         return (EINVAL);
4822                 }
4823                 break;
4824         case I40E_DEV_ID_20G_KR2:
4825                 if (requested_ls & 0x1) {
4826                         device_printf(dev,
4827                             "100Mbs speed not supported on this device.\n");
4828                         return (EINVAL);
4829                 }
4830                 break;
4831         default:
4832                 if (requested_ls & ~0x6) {
4833                         device_printf(dev,
4834                             "Only 1/10Gbs speeds are supported on this device.\n");
4835                         return (EINVAL);
4836                 }
4837                 break;
4838         }
4839
4840         /* Exit if no change */
4841         if (pf->advertised_speed == requested_ls)
4842                 return (0);
4843
4844         error = ixl_set_advertised_speeds(pf, requested_ls);
4845         if (error)
4846                 return (error);
4847
4848         pf->advertised_speed = requested_ls;
4849         ixl_update_link_status(pf);
4850         return (0);
4851 }
4852
4853 /*
4854 ** Get the width and transaction speed of
4855 ** the bus this adapter is plugged into.
4856 */
4857 static u16
4858 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4859 {
4860         u16                     link;
4861         u32                     offset;
4862                 
4863                 
4864         /* Get the PCI Express Capabilities offset */
4865         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4866
4867         /* ...and read the Link Status Register */
4868         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4869
4870         switch (link & I40E_PCI_LINK_WIDTH) {
4871         case I40E_PCI_LINK_WIDTH_1:
4872                 hw->bus.width = i40e_bus_width_pcie_x1;
4873                 break;
4874         case I40E_PCI_LINK_WIDTH_2:
4875                 hw->bus.width = i40e_bus_width_pcie_x2;
4876                 break;
4877         case I40E_PCI_LINK_WIDTH_4:
4878                 hw->bus.width = i40e_bus_width_pcie_x4;
4879                 break;
4880         case I40E_PCI_LINK_WIDTH_8:
4881                 hw->bus.width = i40e_bus_width_pcie_x8;
4882                 break;
4883         default:
4884                 hw->bus.width = i40e_bus_width_unknown;
4885                 break;
4886         }
4887
4888         switch (link & I40E_PCI_LINK_SPEED) {
4889         case I40E_PCI_LINK_SPEED_2500:
4890                 hw->bus.speed = i40e_bus_speed_2500;
4891                 break;
4892         case I40E_PCI_LINK_SPEED_5000:
4893                 hw->bus.speed = i40e_bus_speed_5000;
4894                 break;
4895         case I40E_PCI_LINK_SPEED_8000:
4896                 hw->bus.speed = i40e_bus_speed_8000;
4897                 break;
4898         default:
4899                 hw->bus.speed = i40e_bus_speed_unknown;
4900                 break;
4901         }
4902
4903
4904         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4905             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4906             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4907             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4908             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4909             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4910             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4911             ("Unknown"));
4912
4913         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4914             (hw->bus.speed < i40e_bus_speed_8000)) {
4915                 device_printf(dev, "PCI-Express bandwidth available"
4916                     " for this device\n     may be insufficient for"
4917                     " optimal performance.\n");
4918                 device_printf(dev, "For expected performance a x8 "
4919                     "PCIE Gen3 slot is required.\n");
4920         }
4921
4922         return (link);
4923 }
4924
4925 static int
4926 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4927 {
4928         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4929         struct i40e_hw  *hw = &pf->hw;
4930         char            buf[32];
4931
4932         snprintf(buf, sizeof(buf),
4933             "f%d.%d a%d.%d n%02x.%02x e%08x",
4934             hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4935             hw->aq.api_maj_ver, hw->aq.api_min_ver,
4936             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4937             IXL_NVM_VERSION_HI_SHIFT,
4938             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4939             IXL_NVM_VERSION_LO_SHIFT,
4940             hw->nvm.eetrack);
4941         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4942 }
4943
4944
4945 #ifdef IXL_DEBUG_SYSCTL
4946 static int
4947 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4948 {
4949         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4950         struct i40e_hw *hw = &pf->hw;
4951         struct i40e_link_status link_status;
4952         char buf[512];
4953
4954         enum i40e_status_code aq_error = 0;
4955
4956         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4957         if (aq_error) {
4958                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4959                 return (EPERM);
4960         }
4961
4962         sprintf(buf, "\n"
4963             "PHY Type : %#04x\n"
4964             "Speed    : %#04x\n" 
4965             "Link info: %#04x\n" 
4966             "AN info  : %#04x\n" 
4967             "Ext info : %#04x", 
4968             link_status.phy_type, link_status.link_speed, 
4969             link_status.link_info, link_status.an_info,
4970             link_status.ext_info);
4971
4972         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4973 }
4974
4975 static int
4976 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4977 {
4978         struct ixl_pf           *pf = (struct ixl_pf *)arg1;
4979         struct i40e_hw          *hw = &pf->hw;
4980         char                    buf[512];
4981         enum i40e_status_code   aq_error = 0;
4982
4983         struct i40e_aq_get_phy_abilities_resp abilities;
4984
4985         aq_error = i40e_aq_get_phy_capabilities(hw,
4986             TRUE, FALSE, &abilities, NULL);
4987         if (aq_error) {
4988                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4989                 return (EPERM);
4990         }
4991
4992         sprintf(buf, "\n"
4993             "PHY Type : %#010x\n"
4994             "Speed    : %#04x\n" 
4995             "Abilities: %#04x\n" 
4996             "EEE cap  : %#06x\n" 
4997             "EEER reg : %#010x\n" 
4998             "D3 Lpan  : %#04x",
4999             abilities.phy_type, abilities.link_speed, 
5000             abilities.abilities, abilities.eee_capability,
5001             abilities.eeer_val, abilities.d3_lpan);
5002
5003         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5004 }
5005
5006 static int
5007 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5008 {
5009         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5010         struct ixl_vsi *vsi = &pf->vsi;
5011         struct ixl_mac_filter *f;
5012         char *buf, *buf_i;
5013
5014         int error = 0;
5015         int ftl_len = 0;
5016         int ftl_counter = 0;
5017         int buf_len = 0;
5018         int entry_len = 42;
5019
5020         SLIST_FOREACH(f, &vsi->ftl, next) {
5021                 ftl_len++;
5022         }
5023
5024         if (ftl_len < 1) {
5025                 sysctl_handle_string(oidp, "(none)", 6, req);
5026                 return (0);
5027         }
5028
5029         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5030         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5031
5032         sprintf(buf_i++, "\n");
5033         SLIST_FOREACH(f, &vsi->ftl, next) {
5034                 sprintf(buf_i,
5035                     MAC_FORMAT ", vlan %4d, flags %#06x",
5036                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5037                 buf_i += entry_len;
5038                 /* don't print '\n' for last entry */
5039                 if (++ftl_counter != ftl_len) {
5040                         sprintf(buf_i, "\n");
5041                         buf_i++;
5042                 }
5043         }
5044
5045         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5046         if (error)
5047                 printf("sysctl error: %d\n", error);
5048         free(buf, M_DEVBUF);
5049         return error;
5050 }
5051
5052 #define IXL_SW_RES_SIZE 0x14
5053 static int
5054 ixl_res_alloc_cmp(const void *a, const void *b)
5055 {
5056         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5057         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5058         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5059
5060         return ((int)one->resource_type - (int)two->resource_type);
5061 }
5062
5063 static int
5064 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5065 {
5066         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5067         struct i40e_hw *hw = &pf->hw;
5068         device_t dev = pf->dev;
5069         struct sbuf *buf;
5070         int error = 0;
5071
5072         u8 num_entries;
5073         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5074
5075         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5076         if (!buf) {
5077                 device_printf(dev, "Could not allocate sbuf for output.\n");
5078                 return (ENOMEM);
5079         }
5080
5081         bzero(resp, sizeof(resp));
5082         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5083                                 resp,
5084                                 IXL_SW_RES_SIZE,
5085                                 NULL);
5086         if (error) {
5087                 device_printf(dev,
5088                     "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5089                     __func__, error, hw->aq.asq_last_status);
5090                 sbuf_delete(buf);
5091                 return error;
5092         }
5093
5094         /* Sort entries by type for display */
5095         qsort(resp, num_entries,
5096             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5097             &ixl_res_alloc_cmp);
5098
5099         sbuf_cat(buf, "\n");
5100         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5101         sbuf_printf(buf,
5102             "Type | Guaranteed | Total | Used   | Un-allocated\n"
5103             "     | (this)     | (all) | (this) | (all)       \n");
5104         for (int i = 0; i < num_entries; i++) {
5105                 sbuf_printf(buf,
5106                     "%#4x | %10d   %5d   %6d   %12d",
5107                     resp[i].resource_type,
5108                     resp[i].guaranteed,
5109                     resp[i].total,
5110                     resp[i].used,
5111                     resp[i].total_unalloced);
5112                 if (i < num_entries - 1)
5113                         sbuf_cat(buf, "\n");
5114         }
5115
5116         error = sbuf_finish(buf);
5117         if (error) {
5118                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5119                 sbuf_delete(buf);
5120                 return error;
5121         }
5122
5123         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5124         if (error)
5125                 device_printf(dev, "sysctl error: %d\n", error);
5126         sbuf_delete(buf);
5127
5128         return (error);
5129 }
5130
5131 /*
5132 ** Caller must init and delete sbuf; this function will clear and
5133 ** finish it for caller.
5134 */
5135 static char *
5136 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5137 {
5138         sbuf_clear(s);
5139
5140         if (seid == 0 && uplink)
5141                 sbuf_cat(s, "Network");
5142         else if (seid == 0)
5143                 sbuf_cat(s, "Host");
5144         else if (seid == 1)
5145                 sbuf_cat(s, "EMP");
5146         else if (seid <= 5)
5147                 sbuf_printf(s, "MAC %d", seid - 2);
5148         else if (seid <= 15)
5149                 sbuf_cat(s, "Reserved");
5150         else if (seid <= 31)
5151                 sbuf_printf(s, "PF %d", seid - 16);
5152         else if (seid <= 159)
5153                 sbuf_printf(s, "VF %d", seid - 32);
5154         else if (seid <= 287)
5155                 sbuf_cat(s, "Reserved");
5156         else if (seid <= 511)
5157                 sbuf_cat(s, "Other"); // for other structures
5158         else if (seid <= 895)
5159                 sbuf_printf(s, "VSI %d", seid - 512);
5160         else if (seid <= 1023)
5161                 sbuf_printf(s, "Reserved");
5162         else
5163                 sbuf_cat(s, "Invalid");
5164
5165         sbuf_finish(s);
5166         return sbuf_data(s);
5167 }
5168
5169 static int
5170 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5171 {
5172         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5173         struct i40e_hw *hw = &pf->hw;
5174         device_t dev = pf->dev;
5175         struct sbuf *buf;
5176         struct sbuf *nmbuf;
5177         int error = 0;
5178         u8 aq_buf[I40E_AQ_LARGE_BUF];
5179
5180         u16 next = 0;
5181         struct i40e_aqc_get_switch_config_resp *sw_config;
5182         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5183
5184         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5185         if (!buf) {
5186                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5187                 return (ENOMEM);
5188         }
5189
5190         error = i40e_aq_get_switch_config(hw, sw_config,
5191             sizeof(aq_buf), &next, NULL);
5192         if (error) {
5193                 device_printf(dev,
5194                     "%s: aq_get_switch_config() error %d, aq error %d\n",
5195                     __func__, error, hw->aq.asq_last_status);
5196                 sbuf_delete(buf);
5197                 return error;
5198         }
5199
5200         nmbuf = sbuf_new_auto();
5201         if (!nmbuf) {
5202                 device_printf(dev, "Could not allocate sbuf for name output.\n");
5203                 return (ENOMEM);
5204         }
5205
5206         sbuf_cat(buf, "\n");
5207         // Assuming <= 255 elements in switch
5208         sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5209         /* Exclude:
5210         ** Revision -- all elements are revision 1 for now
5211         */
5212         sbuf_printf(buf,
5213             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5214             "                |          |          | (uplink)\n");
5215         for (int i = 0; i < sw_config->header.num_reported; i++) {
5216                 // "%4d (%8s) | %8s   %8s   %#8x",
5217                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5218                 sbuf_cat(buf, " ");
5219                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5220                     sw_config->element[i].seid, false));
5221                 sbuf_cat(buf, " | ");
5222                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5223                     sw_config->element[i].uplink_seid, true));
5224                 sbuf_cat(buf, "   ");
5225                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5226                     sw_config->element[i].downlink_seid, false));
5227                 sbuf_cat(buf, "   ");
5228                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5229                 if (i < sw_config->header.num_reported - 1)
5230                         sbuf_cat(buf, "\n");
5231         }
5232         sbuf_delete(nmbuf);
5233
5234         error = sbuf_finish(buf);
5235         if (error) {
5236                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5237                 sbuf_delete(buf);
5238                 return error;
5239         }
5240
5241         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5242         if (error)
5243                 device_printf(dev, "sysctl error: %d\n", error);
5244         sbuf_delete(buf);
5245
5246         return (error);
5247 }
5248 #endif /* IXL_DEBUG_SYSCTL */
5249
5250
5251 #ifdef PCI_IOV
5252 static int
5253 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5254 {
5255         struct i40e_hw *hw;
5256         struct ixl_vsi *vsi;
5257         struct i40e_vsi_context vsi_ctx;
5258         int i;
5259         uint16_t first_queue;
5260         enum i40e_status_code code;
5261
5262         hw = &pf->hw;
5263         vsi = &pf->vsi;
5264
5265         vsi_ctx.pf_num = hw->pf_id;
5266         vsi_ctx.uplink_seid = pf->veb_seid;
5267         vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5268         vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5269         vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5270
5271         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5272
5273         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5274         vsi_ctx.info.switch_id = htole16(0);
5275
5276         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5277         vsi_ctx.info.sec_flags = 0;
5278         if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5279                 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5280
5281         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5282         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5283             I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5284
5285         vsi_ctx.info.valid_sections |=
5286             htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5287         vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5288         first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5289         for (i = 0; i < IXLV_MAX_QUEUES; i++)
5290                 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5291         for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5292                 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5293
5294         vsi_ctx.info.tc_mapping[0] = htole16(
5295             (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5296             (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5297
5298         code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5299         if (code != I40E_SUCCESS)
5300                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5301         vf->vsi.seid = vsi_ctx.seid;
5302         vf->vsi.vsi_num = vsi_ctx.vsi_number;
5303         vf->vsi.first_queue = first_queue;
5304         vf->vsi.num_queues = IXLV_MAX_QUEUES;
5305
5306         code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5307         if (code != I40E_SUCCESS)
5308                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5309
5310         code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5311         if (code != I40E_SUCCESS) {
5312                 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5313                     ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5314                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5315         }
5316
5317         memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5318         return (0);
5319 }
5320
5321 static int
5322 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5323 {
5324         struct i40e_hw *hw;
5325         int error;
5326
5327         hw = &pf->hw;
5328
5329         error = ixl_vf_alloc_vsi(pf, vf);
5330         if (error != 0)
5331                 return (error);
5332
5333         vf->vsi.hw_filters_add = 0;
5334         vf->vsi.hw_filters_del = 0;
5335         ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5336         ixl_reconfigure_filters(&vf->vsi);
5337
5338         return (0);
5339 }
5340
5341 static void
5342 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5343     uint32_t val)
5344 {
5345         uint32_t qtable;
5346         int index, shift;
5347
5348         /*
5349          * Two queues are mapped in a single register, so we have to do some
5350          * gymnastics to convert the queue number into a register index and
5351          * shift.
5352          */
5353         index = qnum / 2;
5354         shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5355
5356         qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5357         qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5358         qtable |= val << shift;
5359         wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5360 }
5361
5362 static void
5363 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5364 {
5365         struct i40e_hw *hw;
5366         uint32_t qtable;
5367         int i;
5368
5369         hw = &pf->hw;
5370
5371         /*
5372          * Contiguous mappings aren't actually supported by the hardware,
5373          * so we have to use non-contiguous mappings.
5374          */
5375         wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5376              I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5377
5378         wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5379             I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5380
5381         for (i = 0; i < vf->vsi.num_queues; i++) {
5382                 qtable = (vf->vsi.first_queue + i) <<
5383                     I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5384
5385                 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5386         }
5387
5388         /* Map queues allocated to VF to its VSI. */
5389         for (i = 0; i < vf->vsi.num_queues; i++)
5390                 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5391
5392         /* Set rest of VSI queues as unused. */
5393         for (; i < IXL_MAX_VSI_QUEUES; i++)
5394                 ixl_vf_map_vsi_queue(hw, vf, i,
5395                     I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5396
5397         ixl_flush(hw);
5398 }
5399
5400 static void
5401 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5402 {
5403         struct i40e_hw *hw;
5404
5405         hw = &pf->hw;
5406
5407         if (vsi->seid == 0)
5408                 return;
5409
5410         i40e_aq_delete_element(hw, vsi->seid, NULL);
5411 }
5412
5413 static void
5414 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5415 {
5416
5417         wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5418         ixl_flush(hw);
5419 }
5420
5421 static void
5422 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5423 {
5424
5425         wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5426             I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5427         ixl_flush(hw);
5428 }
5429
5430 static void
5431 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5432 {
5433         struct i40e_hw *hw;
5434         uint32_t vfint_reg, vpint_reg;
5435         int i;
5436
5437         hw = &pf->hw;
5438
5439         ixl_vf_vsi_release(pf, &vf->vsi);
5440
5441         /* Index 0 has a special register. */
5442         ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5443
5444         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5445                 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5446                 ixl_vf_disable_queue_intr(hw, vfint_reg);
5447         }
5448
5449         /* Index 0 has a special register. */
5450         ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5451
5452         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5453                 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5454                 ixl_vf_unregister_intr(hw, vpint_reg);
5455         }
5456
5457         vf->vsi.num_queues = 0;
5458 }
5459
5460 static int
5461 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5462 {
5463         struct i40e_hw *hw;
5464         int i;
5465         uint16_t global_vf_num;
5466         uint32_t ciad;
5467
5468         hw = &pf->hw;
5469         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5470
5471         wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5472              (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5473         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5474                 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5475                 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5476                         return (0);
5477                 DELAY(1);
5478         }
5479
5480         return (ETIMEDOUT);
5481 }
5482
5483 static void
5484 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5485 {
5486         struct i40e_hw *hw;
5487         uint32_t vfrtrig;
5488
5489         hw = &pf->hw;
5490
5491         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5492         vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5493         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5494         ixl_flush(hw);
5495
5496         ixl_reinit_vf(pf, vf);
5497 }
5498
5499 static void
5500 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5501 {
5502         struct i40e_hw *hw;
5503         uint32_t vfrstat, vfrtrig;
5504         int i, error;
5505
5506         hw = &pf->hw;
5507
5508         error = ixl_flush_pcie(pf, vf);
5509         if (error != 0)
5510                 device_printf(pf->dev,
5511                     "Timed out waiting for PCIe activity to stop on VF-%d\n",
5512                     vf->vf_num);
5513
5514         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5515                 DELAY(10);
5516
5517                 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5518                 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5519                         break;
5520         }
5521
5522         if (i == IXL_VF_RESET_TIMEOUT)
5523                 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5524
5525         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5526
5527         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5528         vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5529         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5530
5531         if (vf->vsi.seid != 0)
5532                 ixl_disable_rings(&vf->vsi);
5533
5534         ixl_vf_release_resources(pf, vf);
5535         ixl_vf_setup_vsi(pf, vf);
5536         ixl_vf_map_queues(pf, vf);
5537
5538         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5539         ixl_flush(hw);
5540 }
5541
5542 static const char *
5543 ixl_vc_opcode_str(uint16_t op)
5544 {
5545
5546         switch (op) {
5547         case I40E_VIRTCHNL_OP_VERSION:
5548                 return ("VERSION");
5549         case I40E_VIRTCHNL_OP_RESET_VF:
5550                 return ("RESET_VF");
5551         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5552                 return ("GET_VF_RESOURCES");
5553         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5554                 return ("CONFIG_TX_QUEUE");
5555         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5556                 return ("CONFIG_RX_QUEUE");
5557         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5558                 return ("CONFIG_VSI_QUEUES");
5559         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5560                 return ("CONFIG_IRQ_MAP");
5561         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5562                 return ("ENABLE_QUEUES");
5563         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5564                 return ("DISABLE_QUEUES");
5565         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5566                 return ("ADD_ETHER_ADDRESS");
5567         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5568                 return ("DEL_ETHER_ADDRESS");
5569         case I40E_VIRTCHNL_OP_ADD_VLAN:
5570                 return ("ADD_VLAN");
5571         case I40E_VIRTCHNL_OP_DEL_VLAN:
5572                 return ("DEL_VLAN");
5573         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5574                 return ("CONFIG_PROMISCUOUS_MODE");
5575         case I40E_VIRTCHNL_OP_GET_STATS:
5576                 return ("GET_STATS");
5577         case I40E_VIRTCHNL_OP_FCOE:
5578                 return ("FCOE");
5579         case I40E_VIRTCHNL_OP_EVENT:
5580                 return ("EVENT");
5581         default:
5582                 return ("UNKNOWN");
5583         }
5584 }
5585
5586 static int
5587 ixl_vc_opcode_level(uint16_t opcode)
5588 {
5589
5590         switch (opcode) {
5591         case I40E_VIRTCHNL_OP_GET_STATS:
5592                 return (10);
5593         default:
5594                 return (5);
5595         }
5596 }
5597
5598 static void
5599 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5600     enum i40e_status_code status, void *msg, uint16_t len)
5601 {
5602         struct i40e_hw *hw;
5603         int global_vf_id;
5604
5605         hw = &pf->hw;
5606         global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5607
5608         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5609             "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5610             ixl_vc_opcode_str(op), op, status, vf->vf_num);
5611
5612         i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5613 }
5614
5615 static void
5616 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5617 {
5618
5619         ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5620 }
5621
5622 static void
5623 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5624     enum i40e_status_code status, const char *file, int line)
5625 {
5626
5627         I40E_VC_DEBUG(pf, 1,
5628             "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5629             ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5630         ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5631 }
5632
5633 static void
5634 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5635     uint16_t msg_size)
5636 {
5637         struct i40e_virtchnl_version_info reply;
5638
5639         if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5640                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5641                     I40E_ERR_PARAM);
5642                 return;
5643         }
5644
5645         reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5646         reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5647         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5648             sizeof(reply));
5649 }
5650
5651 static void
5652 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5653     uint16_t msg_size)
5654 {
5655
5656         if (msg_size != 0) {
5657                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5658                     I40E_ERR_PARAM);
5659                 return;
5660         }
5661
5662         ixl_reset_vf(pf, vf);
5663
5664         /* No response to a reset message. */
5665 }
5666
5667 static void
5668 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5669     uint16_t msg_size)
5670 {
5671         struct i40e_virtchnl_vf_resource reply;
5672
5673         if (msg_size != 0) {
5674                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5675                     I40E_ERR_PARAM);
5676                 return;
5677         }
5678
5679         bzero(&reply, sizeof(reply));
5680
5681         reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5682
5683         reply.num_vsis = 1;
5684         reply.num_queue_pairs = vf->vsi.num_queues;
5685         reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5686         reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5687         reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5688         reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5689         memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5690
5691         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5692             I40E_SUCCESS, &reply, sizeof(reply));
5693 }
5694
5695 static int
5696 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5697     struct i40e_virtchnl_txq_info *info)
5698 {
5699         struct i40e_hw *hw;
5700         struct i40e_hmc_obj_txq txq;
5701         uint16_t global_queue_num, global_vf_num;
5702         enum i40e_status_code status;
5703         uint32_t qtx_ctl;
5704
5705         hw = &pf->hw;
5706         global_queue_num = vf->vsi.first_queue + info->queue_id;
5707         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5708         bzero(&txq, sizeof(txq));
5709
5710         status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5711         if (status != I40E_SUCCESS)
5712                 return (EINVAL);
5713
5714         txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5715
5716         txq.head_wb_ena = info->headwb_enabled;
5717         txq.head_wb_addr = info->dma_headwb_addr;
5718         txq.qlen = info->ring_len;
5719         txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5720         txq.rdylist_act = 0;
5721
5722         status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5723         if (status != I40E_SUCCESS)
5724                 return (EINVAL);
5725
5726         qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5727             (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5728             (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5729         wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5730         ixl_flush(hw);
5731
5732         return (0);
5733 }
5734
5735 static int
5736 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5737     struct i40e_virtchnl_rxq_info *info)
5738 {
5739         struct i40e_hw *hw;
5740         struct i40e_hmc_obj_rxq rxq;
5741         uint16_t global_queue_num;
5742         enum i40e_status_code status;
5743
5744         hw = &pf->hw;
5745         global_queue_num = vf->vsi.first_queue + info->queue_id;
5746         bzero(&rxq, sizeof(rxq));
5747
5748         if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5749                 return (EINVAL);
5750
5751         if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5752             info->max_pkt_size < ETHER_MIN_LEN)
5753                 return (EINVAL);
5754
5755         if (info->splithdr_enabled) {
5756                 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5757                         return (EINVAL);
5758
5759                 rxq.hsplit_0 = info->rx_split_pos &
5760                     (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5761                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5762                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5763                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5764                 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5765
5766                 rxq.dtype = 2;
5767         }
5768
5769         status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5770         if (status != I40E_SUCCESS)
5771                 return (EINVAL);
5772
5773         rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5774         rxq.qlen = info->ring_len;
5775
5776         rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5777
5778         rxq.dsize = 1;
5779         rxq.crcstrip = 1;
5780         rxq.l2tsel = 1;
5781
5782         rxq.rxmax = info->max_pkt_size;
5783         rxq.tphrdesc_ena = 1;
5784         rxq.tphwdesc_ena = 1;
5785         rxq.tphdata_ena = 1;
5786         rxq.tphhead_ena = 1;
5787         rxq.lrxqthresh = 2;
5788         rxq.prefena = 1;
5789
5790         status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5791         if (status != I40E_SUCCESS)
5792                 return (EINVAL);
5793
5794         return (0);
5795 }
5796
5797 static void
5798 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5799     uint16_t msg_size)
5800 {
5801         struct i40e_virtchnl_vsi_queue_config_info *info;
5802         struct i40e_virtchnl_queue_pair_info *pair;
5803         int i;
5804
5805         if (msg_size < sizeof(*info)) {
5806                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5807                     I40E_ERR_PARAM);
5808                 return;
5809         }
5810
5811         info = msg;
5812         if (info->num_queue_pairs == 0) {
5813                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5814                     I40E_ERR_PARAM);
5815                 return;
5816         }
5817
5818         if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5819                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5820                     I40E_ERR_PARAM);
5821                 return;
5822         }
5823
5824         if (info->vsi_id != vf->vsi.vsi_num) {
5825                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5826                     I40E_ERR_PARAM);
5827                 return;
5828         }
5829
5830         for (i = 0; i < info->num_queue_pairs; i++) {
5831                 pair = &info->qpair[i];
5832
5833                 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5834                     pair->rxq.vsi_id != vf->vsi.vsi_num ||
5835                     pair->txq.queue_id != pair->rxq.queue_id ||
5836                     pair->txq.queue_id >= vf->vsi.num_queues) {
5837
5838                         i40e_send_vf_nack(pf, vf,
5839                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5840                         return;
5841                 }
5842
5843                 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5844                         i40e_send_vf_nack(pf, vf,
5845                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5846                         return;
5847                 }
5848
5849                 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5850                         i40e_send_vf_nack(pf, vf,
5851                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5852                         return;
5853                 }
5854         }
5855
5856         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5857 }
5858
5859 static void
5860 ixl_vf_set_qctl(struct ixl_pf *pf,
5861     const struct i40e_virtchnl_vector_map *vector,
5862     enum i40e_queue_type cur_type, uint16_t cur_queue,
5863     enum i40e_queue_type *last_type, uint16_t *last_queue)
5864 {
5865         uint32_t offset, qctl;
5866         uint16_t itr_indx;
5867
5868         if (cur_type == I40E_QUEUE_TYPE_RX) {
5869                 offset = I40E_QINT_RQCTL(cur_queue);
5870                 itr_indx = vector->rxitr_idx;
5871         } else {
5872                 offset = I40E_QINT_TQCTL(cur_queue);
5873                 itr_indx = vector->txitr_idx;
5874         }
5875
5876         qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5877             (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5878             (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5879             I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5880             (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5881
5882         wr32(&pf->hw, offset, qctl);
5883
5884         *last_type = cur_type;
5885         *last_queue = cur_queue;
5886 }
5887
5888 static void
5889 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5890     const struct i40e_virtchnl_vector_map *vector)
5891 {
5892         struct i40e_hw *hw;
5893         u_int qindex;
5894         enum i40e_queue_type type, last_type;
5895         uint32_t lnklst_reg;
5896         uint16_t rxq_map, txq_map, cur_queue, last_queue;
5897
5898         hw = &pf->hw;
5899
5900         rxq_map = vector->rxq_map;
5901         txq_map = vector->txq_map;
5902
5903         last_queue = IXL_END_OF_INTR_LNKLST;
5904         last_type = I40E_QUEUE_TYPE_RX;
5905
5906         /*
5907          * The datasheet says to optimize performance, RX queues and TX queues
5908          * should be interleaved in the interrupt linked list, so we process
5909          * both at once here.
5910          */
5911         while ((rxq_map != 0) || (txq_map != 0)) {
5912                 if (txq_map != 0) {
5913                         qindex = ffs(txq_map) - 1;
5914                         type = I40E_QUEUE_TYPE_TX;
5915                         cur_queue = vf->vsi.first_queue + qindex;
5916                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
5917                             &last_type, &last_queue);
5918                         txq_map &= ~(1 << qindex);
5919                 }
5920
5921                 if (rxq_map != 0) {
5922                         qindex = ffs(rxq_map) - 1;
5923                         type = I40E_QUEUE_TYPE_RX;
5924                         cur_queue = vf->vsi.first_queue + qindex;
5925                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
5926                             &last_type, &last_queue);
5927                         rxq_map &= ~(1 << qindex);
5928                 }
5929         }
5930
5931         if (vector->vector_id == 0)
5932                 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5933         else
5934                 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5935                     vf->vf_num);
5936         wr32(hw, lnklst_reg,
5937             (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5938             (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5939
5940         ixl_flush(hw);
5941 }
5942
5943 static void
5944 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5945     uint16_t msg_size)
5946 {
5947         struct i40e_virtchnl_irq_map_info *map;
5948         struct i40e_virtchnl_vector_map *vector;
5949         struct i40e_hw *hw;
5950         int i, largest_txq, largest_rxq;
5951
5952         hw = &pf->hw;
5953
5954         if (msg_size < sizeof(*map)) {
5955                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5956                     I40E_ERR_PARAM);
5957                 return;
5958         }
5959
5960         map = msg;
5961         if (map->num_vectors == 0) {
5962                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5963                     I40E_ERR_PARAM);
5964                 return;
5965         }
5966
5967         if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5968                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5969                     I40E_ERR_PARAM);
5970                 return;
5971         }
5972
5973         for (i = 0; i < map->num_vectors; i++) {
5974                 vector = &map->vecmap[i];
5975
5976                 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5977                     vector->vsi_id != vf->vsi.vsi_num) {
5978                         i40e_send_vf_nack(pf, vf,
5979                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5980                         return;
5981                 }
5982
5983                 if (vector->rxq_map != 0) {
5984                         largest_rxq = fls(vector->rxq_map) - 1;
5985                         if (largest_rxq >= vf->vsi.num_queues) {
5986                                 i40e_send_vf_nack(pf, vf,
5987                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5988                                     I40E_ERR_PARAM);
5989                                 return;
5990                         }
5991                 }
5992
5993                 if (vector->txq_map != 0) {
5994                         largest_txq = fls(vector->txq_map) - 1;
5995                         if (largest_txq >= vf->vsi.num_queues) {
5996                                 i40e_send_vf_nack(pf, vf,
5997                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5998                                     I40E_ERR_PARAM);
5999                                 return;
6000                         }
6001                 }
6002
6003                 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6004                     vector->txitr_idx > IXL_MAX_ITR_IDX) {
6005                         i40e_send_vf_nack(pf, vf,
6006                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6007                             I40E_ERR_PARAM);
6008                         return;
6009                 }
6010
6011                 ixl_vf_config_vector(pf, vf, vector);
6012         }
6013
6014         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6015 }
6016
6017 static void
6018 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6019     uint16_t msg_size)
6020 {
6021         struct i40e_virtchnl_queue_select *select;
6022         int error;
6023
6024         if (msg_size != sizeof(*select)) {
6025                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6026                     I40E_ERR_PARAM);
6027                 return;
6028         }
6029
6030         select = msg;
6031         if (select->vsi_id != vf->vsi.vsi_num ||
6032             select->rx_queues == 0 || select->tx_queues == 0) {
6033                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6034                     I40E_ERR_PARAM);
6035                 return;
6036         }
6037
6038         error = ixl_enable_rings(&vf->vsi);
6039         if (error) {
6040                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6041                     I40E_ERR_TIMEOUT);
6042                 return;
6043         }
6044
6045         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6046 }
6047
6048 static void
6049 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6050     void *msg, uint16_t msg_size)
6051 {
6052         struct i40e_virtchnl_queue_select *select;
6053         int error;
6054
6055         if (msg_size != sizeof(*select)) {
6056                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6057                     I40E_ERR_PARAM);
6058                 return;
6059         }
6060
6061         select = msg;
6062         if (select->vsi_id != vf->vsi.vsi_num ||
6063             select->rx_queues == 0 || select->tx_queues == 0) {
6064                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6065                     I40E_ERR_PARAM);
6066                 return;
6067         }
6068
6069         error = ixl_disable_rings(&vf->vsi);
6070         if (error) {
6071                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6072                     I40E_ERR_TIMEOUT);
6073                 return;
6074         }
6075
6076         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6077 }
6078
6079 static boolean_t
6080 ixl_zero_mac(const uint8_t *addr)
6081 {
6082         uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6083
6084         return (cmp_etheraddr(addr, zero));
6085 }
6086
6087 static boolean_t
6088 ixl_bcast_mac(const uint8_t *addr)
6089 {
6090
6091         return (cmp_etheraddr(addr, ixl_bcast_addr));
6092 }
6093
6094 static int
6095 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6096 {
6097
6098         if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6099                 return (EINVAL);
6100
6101         /*
6102          * If the VF is not allowed to change its MAC address, don't let it
6103          * set a MAC filter for an address that is not a multicast address and
6104          * is not its assigned MAC.
6105          */
6106         if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6107             !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6108                 return (EPERM);
6109
6110         return (0);
6111 }
6112
6113 static void
6114 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6115     uint16_t msg_size)
6116 {
6117         struct i40e_virtchnl_ether_addr_list *addr_list;
6118         struct i40e_virtchnl_ether_addr *addr;
6119         struct ixl_vsi *vsi;
6120         int i;
6121         size_t expected_size;
6122
6123         vsi = &vf->vsi;
6124
6125         if (msg_size < sizeof(*addr_list)) {
6126                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6127                     I40E_ERR_PARAM);
6128                 return;
6129         }
6130
6131         addr_list = msg;
6132         expected_size = sizeof(*addr_list) +
6133             addr_list->num_elements * sizeof(*addr);
6134
6135         if (addr_list->num_elements == 0 ||
6136             addr_list->vsi_id != vsi->vsi_num ||
6137             msg_size != expected_size) {
6138                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6139                     I40E_ERR_PARAM);
6140                 return;
6141         }
6142
6143         for (i = 0; i < addr_list->num_elements; i++) {
6144                 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6145                         i40e_send_vf_nack(pf, vf,
6146                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6147                         return;
6148                 }
6149         }
6150
6151         for (i = 0; i < addr_list->num_elements; i++) {
6152                 addr = &addr_list->list[i];
6153                 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6154         }
6155
6156         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6157 }
6158
6159 static void
6160 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6161     uint16_t msg_size)
6162 {
6163         struct i40e_virtchnl_ether_addr_list *addr_list;
6164         struct i40e_virtchnl_ether_addr *addr;
6165         size_t expected_size;
6166         int i;
6167
6168         if (msg_size < sizeof(*addr_list)) {
6169                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6170                     I40E_ERR_PARAM);
6171                 return;
6172         }
6173
6174         addr_list = msg;
6175         expected_size = sizeof(*addr_list) +
6176             addr_list->num_elements * sizeof(*addr);
6177
6178         if (addr_list->num_elements == 0 ||
6179             addr_list->vsi_id != vf->vsi.vsi_num ||
6180             msg_size != expected_size) {
6181                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6182                     I40E_ERR_PARAM);
6183                 return;
6184         }
6185
6186         for (i = 0; i < addr_list->num_elements; i++) {
6187                 addr = &addr_list->list[i];
6188                 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6189                         i40e_send_vf_nack(pf, vf,
6190                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6191                         return;
6192                 }
6193         }
6194
6195         for (i = 0; i < addr_list->num_elements; i++) {
6196                 addr = &addr_list->list[i];
6197                 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6198         }
6199
6200         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6201 }
6202
6203 static enum i40e_status_code
6204 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6205 {
6206         struct i40e_vsi_context vsi_ctx;
6207
6208         vsi_ctx.seid = vf->vsi.seid;
6209
6210         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6211         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6212         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6213             I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6214         return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6215 }
6216
6217 static void
6218 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6219     uint16_t msg_size)
6220 {
6221         struct i40e_virtchnl_vlan_filter_list *filter_list;
6222         enum i40e_status_code code;
6223         size_t expected_size;
6224         int i;
6225
6226         if (msg_size < sizeof(*filter_list)) {
6227                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6228                     I40E_ERR_PARAM);
6229                 return;
6230         }
6231
6232         filter_list = msg;
6233         expected_size = sizeof(*filter_list) +
6234             filter_list->num_elements * sizeof(uint16_t);
6235         if (filter_list->num_elements == 0 ||
6236             filter_list->vsi_id != vf->vsi.vsi_num ||
6237             msg_size != expected_size) {
6238                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6239                     I40E_ERR_PARAM);
6240                 return;
6241         }
6242
6243         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6244                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6245                     I40E_ERR_PARAM);
6246                 return;
6247         }
6248
6249         for (i = 0; i < filter_list->num_elements; i++) {
6250                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6251                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6252                             I40E_ERR_PARAM);
6253                         return;
6254                 }
6255         }
6256
6257         code = ixl_vf_enable_vlan_strip(pf, vf);
6258         if (code != I40E_SUCCESS) {
6259                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6260                     I40E_ERR_PARAM);
6261         }
6262
6263         for (i = 0; i < filter_list->num_elements; i++)
6264                 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6265
6266         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6267 }
6268
6269 static void
6270 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6271     uint16_t msg_size)
6272 {
6273         struct i40e_virtchnl_vlan_filter_list *filter_list;
6274         int i;
6275         size_t expected_size;
6276
6277         if (msg_size < sizeof(*filter_list)) {
6278                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6279                     I40E_ERR_PARAM);
6280                 return;
6281         }
6282
6283         filter_list = msg;
6284         expected_size = sizeof(*filter_list) +
6285             filter_list->num_elements * sizeof(uint16_t);
6286         if (filter_list->num_elements == 0 ||
6287             filter_list->vsi_id != vf->vsi.vsi_num ||
6288             msg_size != expected_size) {
6289                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6290                     I40E_ERR_PARAM);
6291                 return;
6292         }
6293
6294         for (i = 0; i < filter_list->num_elements; i++) {
6295                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6296                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6297                             I40E_ERR_PARAM);
6298                         return;
6299                 }
6300         }
6301
6302         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6303                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6304                     I40E_ERR_PARAM);
6305                 return;
6306         }
6307
6308         for (i = 0; i < filter_list->num_elements; i++)
6309                 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6310
6311         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6312 }
6313
6314 static void
6315 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6316     void *msg, uint16_t msg_size)
6317 {
6318         struct i40e_virtchnl_promisc_info *info;
6319         enum i40e_status_code code;
6320
6321         if (msg_size != sizeof(*info)) {
6322                 i40e_send_vf_nack(pf, vf,
6323                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6324                 return;
6325         }
6326
6327         if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6328                 i40e_send_vf_nack(pf, vf,
6329                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6330                 return;
6331         }
6332
6333         info = msg;
6334         if (info->vsi_id != vf->vsi.vsi_num) {
6335                 i40e_send_vf_nack(pf, vf,
6336                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6337                 return;
6338         }
6339
6340         code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6341             info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6342         if (code != I40E_SUCCESS) {
6343                 i40e_send_vf_nack(pf, vf,
6344                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6345                 return;
6346         }
6347
6348         code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6349             info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6350         if (code != I40E_SUCCESS) {
6351                 i40e_send_vf_nack(pf, vf,
6352                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6353                 return;
6354         }
6355
6356         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6357 }
6358
6359 static void
6360 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6361     uint16_t msg_size)
6362 {
6363         struct i40e_virtchnl_queue_select *queue;
6364
6365         if (msg_size != sizeof(*queue)) {
6366                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6367                     I40E_ERR_PARAM);
6368                 return;
6369         }
6370
6371         queue = msg;
6372         if (queue->vsi_id != vf->vsi.vsi_num) {
6373                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6374                     I40E_ERR_PARAM);
6375                 return;
6376         }
6377
6378         ixl_update_eth_stats(&vf->vsi);
6379
6380         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6381             I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6382 }
6383
6384 static void
6385 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6386 {
6387         struct ixl_vf *vf;
6388         void *msg;
6389         uint16_t vf_num, msg_size;
6390         uint32_t opcode;
6391
6392         vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6393         opcode = le32toh(event->desc.cookie_high);
6394
6395         if (vf_num >= pf->num_vfs) {
6396                 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6397                 return;
6398         }
6399
6400         vf = &pf->vfs[vf_num];
6401         msg = event->msg_buf;
6402         msg_size = event->msg_len;
6403
6404         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6405             "Got msg %s(%d) from VF-%d of size %d\n",
6406             ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6407
6408         switch (opcode) {
6409         case I40E_VIRTCHNL_OP_VERSION:
6410                 ixl_vf_version_msg(pf, vf, msg, msg_size);
6411                 break;
6412         case I40E_VIRTCHNL_OP_RESET_VF:
6413                 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6414                 break;
6415         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6416                 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6417                 break;
6418         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6419                 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6420                 break;
6421         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6422                 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6423                 break;
6424         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6425                 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6426                 break;
6427         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6428                 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6429                 break;
6430         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6431                 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6432                 break;
6433         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6434                 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6435                 break;
6436         case I40E_VIRTCHNL_OP_ADD_VLAN:
6437                 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6438                 break;
6439         case I40E_VIRTCHNL_OP_DEL_VLAN:
6440                 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6441                 break;
6442         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6443                 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6444                 break;
6445         case I40E_VIRTCHNL_OP_GET_STATS:
6446                 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6447                 break;
6448
6449         /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6450         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6451         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6452         default:
6453                 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6454                 break;
6455         }
6456 }
6457
6458 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6459 static void
6460 ixl_handle_vflr(void *arg, int pending)
6461 {
6462         struct ixl_pf *pf;
6463         struct i40e_hw *hw;
6464         uint16_t global_vf_num;
6465         uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6466         int i;
6467
6468         pf = arg;
6469         hw = &pf->hw;
6470
6471         IXL_PF_LOCK(pf);
6472         for (i = 0; i < pf->num_vfs; i++) {
6473                 global_vf_num = hw->func_caps.vf_base_id + i;
6474
6475                 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6476                 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6477                 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6478                 if (vflrstat & vflrstat_mask) {
6479                         wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6480                             vflrstat_mask);
6481
6482                         ixl_reinit_vf(pf, &pf->vfs[i]);
6483                 }
6484         }
6485
6486         icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6487         icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6488         wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6489         ixl_flush(hw);
6490
6491         IXL_PF_UNLOCK(pf);
6492 }
6493
6494 static int
6495 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6496 {
6497
6498         switch (err) {
6499         case I40E_AQ_RC_EPERM:
6500                 return (EPERM);
6501         case I40E_AQ_RC_ENOENT:
6502                 return (ENOENT);
6503         case I40E_AQ_RC_ESRCH:
6504                 return (ESRCH);
6505         case I40E_AQ_RC_EINTR:
6506                 return (EINTR);
6507         case I40E_AQ_RC_EIO:
6508                 return (EIO);
6509         case I40E_AQ_RC_ENXIO:
6510                 return (ENXIO);
6511         case I40E_AQ_RC_E2BIG:
6512                 return (E2BIG);
6513         case I40E_AQ_RC_EAGAIN:
6514                 return (EAGAIN);
6515         case I40E_AQ_RC_ENOMEM:
6516                 return (ENOMEM);
6517         case I40E_AQ_RC_EACCES:
6518                 return (EACCES);
6519         case I40E_AQ_RC_EFAULT:
6520                 return (EFAULT);
6521         case I40E_AQ_RC_EBUSY:
6522                 return (EBUSY);
6523         case I40E_AQ_RC_EEXIST:
6524                 return (EEXIST);
6525         case I40E_AQ_RC_EINVAL:
6526                 return (EINVAL);
6527         case I40E_AQ_RC_ENOTTY:
6528                 return (ENOTTY);
6529         case I40E_AQ_RC_ENOSPC:
6530                 return (ENOSPC);
6531         case I40E_AQ_RC_ENOSYS:
6532                 return (ENOSYS);
6533         case I40E_AQ_RC_ERANGE:
6534                 return (ERANGE);
6535         case I40E_AQ_RC_EFLUSHED:
6536                 return (EINVAL);        /* No exact equivalent in errno.h */
6537         case I40E_AQ_RC_BAD_ADDR:
6538                 return (EFAULT);
6539         case I40E_AQ_RC_EMODE:
6540                 return (EPERM);
6541         case I40E_AQ_RC_EFBIG:
6542                 return (EFBIG);
6543         default:
6544                 return (EINVAL);
6545         }
6546 }
6547
6548 static int
6549 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6550 {
6551         struct ixl_pf *pf;
6552         struct i40e_hw *hw;
6553         struct ixl_vsi *pf_vsi;
6554         enum i40e_status_code ret;
6555         int i, error;
6556
6557         pf = device_get_softc(dev);
6558         hw = &pf->hw;
6559         pf_vsi = &pf->vsi;
6560
6561         IXL_PF_LOCK(pf);
6562         pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6563             M_ZERO);
6564
6565         if (pf->vfs == NULL) {
6566                 error = ENOMEM;
6567                 goto fail;
6568         }
6569
6570         for (i = 0; i < num_vfs; i++)
6571                 sysctl_ctx_init(&pf->vfs[i].ctx);
6572
6573         ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6574             1, FALSE, FALSE, &pf->veb_seid, NULL);
6575         if (ret != I40E_SUCCESS) {
6576                 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6577                 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6578                     error);
6579                 goto fail;
6580         }
6581
6582         ixl_configure_msix(pf);
6583         ixl_enable_adminq(hw);
6584
6585         pf->num_vfs = num_vfs;
6586         IXL_PF_UNLOCK(pf);
6587         return (0);
6588
6589 fail:
6590         free(pf->vfs, M_IXL);
6591         pf->vfs = NULL;
6592         IXL_PF_UNLOCK(pf);
6593         return (error);
6594 }
6595
6596 static void
6597 ixl_uninit_iov(device_t dev)
6598 {
6599         struct ixl_pf *pf;
6600         struct i40e_hw *hw;
6601         struct ixl_vsi *vsi;
6602         struct ifnet *ifp;
6603         struct ixl_vf *vfs;
6604         int i, num_vfs;
6605
6606         pf = device_get_softc(dev);
6607         hw = &pf->hw;
6608         vsi = &pf->vsi;
6609         ifp = vsi->ifp;
6610
6611         IXL_PF_LOCK(pf);
6612         for (i = 0; i < pf->num_vfs; i++) {
6613                 if (pf->vfs[i].vsi.seid != 0)
6614                         i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6615         }
6616
6617         if (pf->veb_seid != 0) {
6618                 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6619                 pf->veb_seid = 0;
6620         }
6621
6622         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6623                 ixl_disable_intr(vsi);
6624
6625         vfs = pf->vfs;
6626         num_vfs = pf->num_vfs;
6627
6628         pf->vfs = NULL;
6629         pf->num_vfs = 0;
6630         IXL_PF_UNLOCK(pf);
6631
6632         /* Do this after the unlock as sysctl_ctx_free might sleep. */
6633         for (i = 0; i < num_vfs; i++)
6634                 sysctl_ctx_free(&vfs[i].ctx);
6635         free(vfs, M_IXL);
6636 }
6637
6638 static int
6639 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6640 {
6641         char sysctl_name[QUEUE_NAME_LEN];
6642         struct ixl_pf *pf;
6643         struct ixl_vf *vf;
6644         const void *mac;
6645         size_t size;
6646         int error;
6647
6648         pf = device_get_softc(dev);
6649         vf = &pf->vfs[vfnum];
6650
6651         IXL_PF_LOCK(pf);
6652         vf->vf_num = vfnum;
6653
6654         vf->vsi.back = pf;
6655         vf->vf_flags = VF_FLAG_ENABLED;
6656         SLIST_INIT(&vf->vsi.ftl);
6657
6658         error = ixl_vf_setup_vsi(pf, vf);
6659         if (error != 0)
6660                 goto out;
6661
6662         if (nvlist_exists_binary(params, "mac-addr")) {
6663                 mac = nvlist_get_binary(params, "mac-addr", &size);
6664                 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6665
6666                 if (nvlist_get_bool(params, "allow-set-mac"))
6667                         vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6668         } else
6669                 /*
6670                  * If the administrator has not specified a MAC address then
6671                  * we must allow the VF to choose one.
6672                  */
6673                 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6674
6675         if (nvlist_get_bool(params, "mac-anti-spoof"))
6676                 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6677
6678         if (nvlist_get_bool(params, "allow-promisc"))
6679                 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6680
6681         vf->vf_flags |= VF_FLAG_VLAN_CAP;
6682
6683         ixl_reset_vf(pf, vf);
6684 out:
6685         IXL_PF_UNLOCK(pf);
6686         if (error == 0) {
6687                 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6688                 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6689         }
6690
6691         return (error);
6692 }
6693 #endif /* PCI_IOV */