]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixl/if_ixl.c
MFC r285768: LAA MAC/VLAN filter + success check
[FreeBSD/stable/10.git] / sys / dev / ixl / if_ixl.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifndef IXL_STANDALONE_BUILD
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #endif
39
40 #include "ixl.h"
41 #include "ixl_pf.h"
42
43 #ifdef RSS
44 #include <net/rss_config.h>
45 #endif
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 char ixl_driver_version[] = "1.4.3";
51
52 /*********************************************************************
53  *  PCI Device ID Table
54  *
55  *  Used by probe to select devices to load on
56  *  Last field stores an index into ixl_strings
57  *  Last entry must be all 0s
58  *
59  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60  *********************************************************************/
61
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
63 {
64         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
75 #ifdef X722_SUPPORT
76         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
77         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
78         {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
79 #endif
80         /* required last entry */
81         {0, 0, 0, 0, 0}
82 };
83
84 /*********************************************************************
85  *  Table of branding strings
86  *********************************************************************/
87
88 static char    *ixl_strings[] = {
89         "Intel(R) Ethernet Connection XL710 Driver"
90 };
91
92
93 /*********************************************************************
94  *  Function prototypes
95  *********************************************************************/
96 static int      ixl_probe(device_t);
97 static int      ixl_attach(device_t);
98 static int      ixl_detach(device_t);
99 static int      ixl_shutdown(device_t);
100 static int      ixl_get_hw_capabilities(struct ixl_pf *);
101 static void     ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
102 static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void     ixl_init(void *);
104 static void     ixl_init_locked(struct ixl_pf *);
105 static void     ixl_stop(struct ixl_pf *);
106 static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
107 static int      ixl_media_change(struct ifnet *);
108 static void     ixl_update_link_status(struct ixl_pf *);
109 static int      ixl_allocate_pci_resources(struct ixl_pf *);
110 static u16      ixl_get_bus_info(struct i40e_hw *, device_t);
111 static int      ixl_setup_stations(struct ixl_pf *);
112 static int      ixl_switch_config(struct ixl_pf *);
113 static int      ixl_initialize_vsi(struct ixl_vsi *);
114 static int      ixl_assign_vsi_msix(struct ixl_pf *);
115 static int      ixl_assign_vsi_legacy(struct ixl_pf *);
116 static int      ixl_init_msix(struct ixl_pf *);
117 static void     ixl_configure_msix(struct ixl_pf *);
118 static void     ixl_configure_itr(struct ixl_pf *);
119 static void     ixl_configure_legacy(struct ixl_pf *);
120 static void     ixl_free_pci_resources(struct ixl_pf *);
121 static void     ixl_local_timer(void *);
122 static int      ixl_setup_interface(device_t, struct ixl_vsi *);
123 static void     ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124 static void     ixl_config_rss(struct ixl_vsi *);
125 static void     ixl_set_queue_rx_itr(struct ixl_queue *);
126 static void     ixl_set_queue_tx_itr(struct ixl_queue *);
127 static int      ixl_set_advertised_speeds(struct ixl_pf *, int);
128
129 static int      ixl_enable_rings(struct ixl_vsi *);
130 static int      ixl_disable_rings(struct ixl_vsi *);
131 static void     ixl_enable_intr(struct ixl_vsi *);
132 static void     ixl_disable_intr(struct ixl_vsi *);
133 static void     ixl_disable_rings_intr(struct ixl_vsi *);
134
135 static void     ixl_enable_adminq(struct i40e_hw *);
136 static void     ixl_disable_adminq(struct i40e_hw *);
137 static void     ixl_enable_queue(struct i40e_hw *, int);
138 static void     ixl_disable_queue(struct i40e_hw *, int);
139 static void     ixl_enable_legacy(struct i40e_hw *);
140 static void     ixl_disable_legacy(struct i40e_hw *);
141
142 static void     ixl_set_promisc(struct ixl_vsi *);
143 static void     ixl_add_multi(struct ixl_vsi *);
144 static void     ixl_del_multi(struct ixl_vsi *);
145 static void     ixl_register_vlan(void *, struct ifnet *, u16);
146 static void     ixl_unregister_vlan(void *, struct ifnet *, u16);
147 static void     ixl_setup_vlan_filters(struct ixl_vsi *);
148
149 static void     ixl_init_filters(struct ixl_vsi *);
150 static void     ixl_reconfigure_filters(struct ixl_vsi *vsi);
151 static void     ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152 static void     ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void     ixl_add_hw_filters(struct ixl_vsi *, int, int);
154 static void     ixl_del_hw_filters(struct ixl_vsi *, int);
155 static struct ixl_mac_filter *
156                 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157 static void     ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158 static void     ixl_free_mac_filters(struct ixl_vsi *vsi);
159
160
161 /* Sysctl debug interface */
162 static int      ixl_debug_info(SYSCTL_HANDLER_ARGS);
163 static void     ixl_print_debug_info(struct ixl_pf *);
164
165 /* The MSI/X Interrupt handlers */
166 static void     ixl_intr(void *);
167 static void     ixl_msix_que(void *);
168 static void     ixl_msix_adminq(void *);
169 static void     ixl_handle_mdd_event(struct ixl_pf *);
170
171 /* Deferred interrupt tasklets */
172 static void     ixl_do_adminq(void *, int);
173
174 /* Sysctl handlers */
175 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
176 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
177 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
178 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
179
180 /* Statistics */
181 static void     ixl_add_hw_stats(struct ixl_pf *);
182 static void     ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
183                     struct sysctl_oid_list *, struct i40e_hw_port_stats *);
184 static void     ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
185                     struct sysctl_oid_list *,
186                     struct i40e_eth_stats *);
187 static void     ixl_update_stats_counters(struct ixl_pf *);
188 static void     ixl_update_eth_stats(struct ixl_vsi *);
189 static void     ixl_update_vsi_stats(struct ixl_vsi *);
190 static void     ixl_pf_reset_stats(struct ixl_pf *);
191 static void     ixl_vsi_reset_stats(struct ixl_vsi *);
192 static void     ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
193                     u64 *, u64 *);
194 static void     ixl_stat_update32(struct i40e_hw *, u32, bool,
195                     u64 *, u64 *);
196
197 #ifdef IXL_DEBUG_SYSCTL
198 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
199 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
200 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
201 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
202 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
203 #endif
204
205 #ifdef PCI_IOV
206 static int      ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
207
208 static int      ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
209 static void     ixl_uninit_iov(device_t dev);
210 static int      ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
211
212 static void     ixl_handle_vf_msg(struct ixl_pf *,
213                     struct i40e_arq_event_info *);
214 static void     ixl_handle_vflr(void *arg, int pending);
215
216 static void     ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
217 static void     ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
218 #endif
219
220 /*********************************************************************
221  *  FreeBSD Device Interface Entry Points
222  *********************************************************************/
223
224 static device_method_t ixl_methods[] = {
225         /* Device interface */
226         DEVMETHOD(device_probe, ixl_probe),
227         DEVMETHOD(device_attach, ixl_attach),
228         DEVMETHOD(device_detach, ixl_detach),
229         DEVMETHOD(device_shutdown, ixl_shutdown),
230 #ifdef PCI_IOV
231         DEVMETHOD(pci_init_iov, ixl_init_iov),
232         DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
233         DEVMETHOD(pci_add_vf, ixl_add_vf),
234 #endif
235         {0, 0}
236 };
237
238 static driver_t ixl_driver = {
239         "ixl", ixl_methods, sizeof(struct ixl_pf),
240 };
241
242 devclass_t ixl_devclass;
243 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
244
245 MODULE_DEPEND(ixl, pci, 1, 1, 1);
246 MODULE_DEPEND(ixl, ether, 1, 1, 1);
247 #ifdef DEV_NETMAP
248 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
249 #endif /* DEV_NETMAP */
250
251 /*
252 ** Global reset mutex
253 */
254 static struct mtx ixl_reset_mtx;
255
256 /*
257 ** TUNEABLE PARAMETERS:
258 */
259
260 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
261                    "IXL driver parameters");
262
263 /*
264  * MSIX should be the default for best performance,
265  * but this allows it to be forced off for testing.
266  */
267 static int ixl_enable_msix = 1;
268 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
270     "Enable MSI-X interrupts");
271
272 /*
273 ** Number of descriptors per ring:
274 **   - TX and RX are the same size
275 */
276 static int ixl_ringsz = DEFAULT_RING;
277 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
278 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
279     &ixl_ringsz, 0, "Descriptor Ring Size");
280
281 /* 
282 ** This can be set manually, if left as 0 the
283 ** number of queues will be calculated based
284 ** on cpus and msix vectors available.
285 */
286 int ixl_max_queues = 0;
287 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
289     &ixl_max_queues, 0, "Number of Queues");
290
291 /*
292 ** Controls for Interrupt Throttling 
293 **      - true/false for dynamic adjustment
294 **      - default values for static ITR
295 */
296 int ixl_dynamic_rx_itr = 0;
297 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
299     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
300
301 int ixl_dynamic_tx_itr = 0;
302 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
304     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
305
306 int ixl_rx_itr = IXL_ITR_8K;
307 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
308 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
309     &ixl_rx_itr, 0, "RX Interrupt Rate");
310
311 int ixl_tx_itr = IXL_ITR_4K;
312 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
314     &ixl_tx_itr, 0, "TX Interrupt Rate");
315
316 #ifdef IXL_FDIR
317 static int ixl_enable_fdir = 1;
318 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
319 /* Rate at which we sample */
320 int ixl_atr_rate = 20;
321 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
322 #endif
323
324 #ifdef DEV_NETMAP
325 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
326 #include <dev/netmap/if_ixl_netmap.h>
327 #endif /* DEV_NETMAP */
328
329 static char *ixl_fc_string[6] = {
330         "None",
331         "Rx",
332         "Tx",
333         "Full",
334         "Priority",
335         "Default"
336 };
337
338 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
339
340 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
341     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
342
343 /*********************************************************************
344  *  Device identification routine
345  *
346  *  ixl_probe determines if the driver should be loaded on
347  *  the hardware based on PCI vendor/device id of the device.
348  *
349  *  return BUS_PROBE_DEFAULT on success, positive on failure
350  *********************************************************************/
351
352 static int
353 ixl_probe(device_t dev)
354 {
355         ixl_vendor_info_t *ent;
356
357         u16     pci_vendor_id, pci_device_id;
358         u16     pci_subvendor_id, pci_subdevice_id;
359         char    device_name[256];
360         static bool lock_init = FALSE;
361
362         INIT_DEBUGOUT("ixl_probe: begin");
363
364         pci_vendor_id = pci_get_vendor(dev);
365         if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
366                 return (ENXIO);
367
368         pci_device_id = pci_get_device(dev);
369         pci_subvendor_id = pci_get_subvendor(dev);
370         pci_subdevice_id = pci_get_subdevice(dev);
371
372         ent = ixl_vendor_info_array;
373         while (ent->vendor_id != 0) {
374                 if ((pci_vendor_id == ent->vendor_id) &&
375                     (pci_device_id == ent->device_id) &&
376
377                     ((pci_subvendor_id == ent->subvendor_id) ||
378                      (ent->subvendor_id == 0)) &&
379
380                     ((pci_subdevice_id == ent->subdevice_id) ||
381                      (ent->subdevice_id == 0))) {
382                         sprintf(device_name, "%s, Version - %s",
383                                 ixl_strings[ent->index],
384                                 ixl_driver_version);
385                         device_set_desc_copy(dev, device_name);
386                         /* One shot mutex init */
387                         if (lock_init == FALSE) {
388                                 lock_init = TRUE;
389                                 mtx_init(&ixl_reset_mtx,
390                                     "ixl_reset",
391                                     "IXL RESET Lock", MTX_DEF);
392                         }
393                         return (BUS_PROBE_DEFAULT);
394                 }
395                 ent++;
396         }
397         return (ENXIO);
398 }
399
400 /*********************************************************************
401  *  Device initialization routine
402  *
403  *  The attach entry point is called when the driver is being loaded.
404  *  This routine identifies the type of hardware, allocates all resources
405  *  and initializes the hardware.
406  *
407  *  return 0 on success, positive on failure
408  *********************************************************************/
409
410 static int
411 ixl_attach(device_t dev)
412 {
413         struct ixl_pf   *pf;
414         struct i40e_hw  *hw;
415         struct ixl_vsi *vsi;
416         u16             bus;
417         int             error = 0;
418 #ifdef PCI_IOV
419         nvlist_t        *pf_schema, *vf_schema;
420         int             iov_error;
421 #endif
422
423         INIT_DEBUGOUT("ixl_attach: begin");
424
425         /* Allocate, clear, and link in our primary soft structure */
426         pf = device_get_softc(dev);
427         pf->dev = pf->osdep.dev = dev;
428         hw = &pf->hw;
429
430         /*
431         ** Note this assumes we have a single embedded VSI,
432         ** this could be enhanced later to allocate multiple
433         */
434         vsi = &pf->vsi;
435         vsi->dev = pf->dev;
436
437         /* Core Lock Init*/
438         IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
439
440         /* Set up the timer callout */
441         callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
442
443         /* Set up sysctls */
444         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
445             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
446             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
447             pf, 0, ixl_set_flowcntl, "I", "Flow Control");
448
449         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
450             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
451             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
452             pf, 0, ixl_set_advertise, "I", "Advertised Speed");
453
454         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
455             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
456             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
457             pf, 0, ixl_current_speed, "A", "Current Port Speed");
458
459         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
460             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
462             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
463
464         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
465             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
466             OID_AUTO, "rx_itr", CTLFLAG_RW,
467             &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
468
469         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
470             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
472             &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
473
474         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
475             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
476             OID_AUTO, "tx_itr", CTLFLAG_RW,
477             &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
478
479         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
480             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
481             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
482             &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
483
484 #ifdef IXL_DEBUG_SYSCTL
485         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
488             ixl_debug_info, "I", "Debug Information");
489
490         /* Debug shared-code message level */
491         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
492             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
493             OID_AUTO, "debug_mask", CTLFLAG_RW,
494             &pf->hw.debug_mask, 0, "Debug Message Level");
495
496         SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
497             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
499             0, "PF/VF Virtual Channel debug level");
500
501         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
502             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
504             pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
505
506         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
507             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
508             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
509             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
510
511         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
512             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
514             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
515
516         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
517             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
518             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
519             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
520
521         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
522             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
523             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
524             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
525 #endif
526
527         /* Save off the PCI information */
528         hw->vendor_id = pci_get_vendor(dev);
529         hw->device_id = pci_get_device(dev);
530         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
531         hw->subsystem_vendor_id =
532             pci_read_config(dev, PCIR_SUBVEND_0, 2);
533         hw->subsystem_device_id =
534             pci_read_config(dev, PCIR_SUBDEV_0, 2);
535
536         hw->bus.device = pci_get_slot(dev);
537         hw->bus.func = pci_get_function(dev);
538
539         pf->vc_debug_lvl = 1;
540
541         /* Do PCI setup - map BAR0, etc */
542         if (ixl_allocate_pci_resources(pf)) {
543                 device_printf(dev, "Allocation of PCI resources failed\n");
544                 error = ENXIO;
545                 goto err_out;
546         }
547
548         /* Establish a clean starting point */
549         i40e_clear_hw(hw);
550         error = i40e_pf_reset(hw);
551         if (error) {
552                 device_printf(dev,"PF reset failure %x\n", error);
553                 error = EIO;
554                 goto err_out;
555         }
556
557         /* Set admin queue parameters */
558         hw->aq.num_arq_entries = IXL_AQ_LEN;
559         hw->aq.num_asq_entries = IXL_AQ_LEN;
560         hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
561         hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
562
563         /* Initialize the shared code */
564         error = i40e_init_shared_code(hw);
565         if (error) {
566                 device_printf(dev,"Unable to initialize the shared code\n");
567                 error = EIO;
568                 goto err_out;
569         }
570
571         /* Set up the admin queue */
572         error = i40e_init_adminq(hw);
573         if (error) {
574                 device_printf(dev, "The driver for the device stopped "
575                     "because the NVM image is newer than expected.\n"
576                     "You must install the most recent version of "
577                     " the network driver.\n");
578                 goto err_out;
579         }
580         device_printf(dev, "%s\n", ixl_fw_version_str(hw));
581
582         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
583             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
584                 device_printf(dev, "The driver for the device detected "
585                     "a newer version of the NVM image than expected.\n"
586                     "Please install the most recent version of the network driver.\n");
587         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
588             hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
589                 device_printf(dev, "The driver for the device detected "
590                     "an older version of the NVM image than expected.\n"
591                     "Please update the NVM image.\n");
592
593         /* Clear PXE mode */
594         i40e_clear_pxe_mode(hw);
595
596         /* Get capabilities from the device */
597         error = ixl_get_hw_capabilities(pf);
598         if (error) {
599                 device_printf(dev, "HW capabilities failure!\n");
600                 goto err_get_cap;
601         }
602
603         /* Set up host memory cache */
604         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
605             hw->func_caps.num_rx_qp, 0, 0);
606         if (error) {
607                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
608                 goto err_get_cap;
609         }
610
611         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
612         if (error) {
613                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
614                 goto err_mac_hmc;
615         }
616
617         /* Disable LLDP from the firmware */
618         i40e_aq_stop_lldp(hw, TRUE, NULL);
619
620         i40e_get_mac_addr(hw, hw->mac.addr);
621         error = i40e_validate_mac_addr(hw->mac.addr);
622         if (error) {
623                 device_printf(dev, "validate_mac_addr failed: %d\n", error);
624                 goto err_mac_hmc;
625         }
626         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
627         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
628
629         /* Set up VSI and queues */
630         if (ixl_setup_stations(pf) != 0) { 
631                 device_printf(dev, "setup stations failed!\n");
632                 error = ENOMEM;
633                 goto err_mac_hmc;
634         }
635
636         /* Initialize mac filter list for VSI */
637         SLIST_INIT(&vsi->ftl);
638
639         /* Set up interrupt routing here */
640         if (pf->msix > 1)
641                 error = ixl_assign_vsi_msix(pf);
642         else
643                 error = ixl_assign_vsi_legacy(pf);
644         if (error) 
645                 goto err_late;
646
647         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
648             (hw->aq.fw_maj_ver < 4)) {
649                 i40e_msec_delay(75);
650                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
651                 if (error)
652                         device_printf(dev, "link restart failed, aq_err=%d\n",
653                             pf->hw.aq.asq_last_status);
654         }
655
656         /* Determine link state */
657         i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
658         i40e_get_link_status(hw, &pf->link_up);
659
660         /* Setup OS specific network interface */
661         if (ixl_setup_interface(dev, vsi) != 0) {
662                 device_printf(dev, "interface setup failed!\n");
663                 error = EIO;
664                 goto err_late;
665         }
666
667         error = ixl_switch_config(pf);
668         if (error) {
669                 device_printf(dev, "Initial switch config failed: %d\n", error);
670                 goto err_mac_hmc;
671         }
672
673         /* Limit phy interrupts to link and modules failure */
674         error = i40e_aq_set_phy_int_mask(hw,
675             I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
676         if (error)
677                 device_printf(dev, "set phy mask failed: %d\n", error);
678
679         /* Get the bus configuration and set the shared code */
680         bus = ixl_get_bus_info(hw, dev);
681         i40e_set_pci_config_data(hw, bus);
682
683         /* Initialize statistics */
684         ixl_pf_reset_stats(pf);
685         ixl_update_stats_counters(pf);
686         ixl_add_hw_stats(pf);
687
688         /* Register for VLAN events */
689         vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
690             ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
691         vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
692             ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
693
694 #ifdef PCI_IOV
695         /* SR-IOV is only supported when MSI-X is in use. */
696         if (pf->msix > 1) {
697                 pf_schema = pci_iov_schema_alloc_node();
698                 vf_schema = pci_iov_schema_alloc_node();
699                 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
700                 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
701                     IOV_SCHEMA_HASDEFAULT, TRUE);
702                 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
703                     IOV_SCHEMA_HASDEFAULT, FALSE);
704                 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
705                     IOV_SCHEMA_HASDEFAULT, FALSE);
706
707                 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
708                 if (iov_error != 0)
709                         device_printf(dev,
710                             "Failed to initialize SR-IOV (error=%d)\n",
711                             iov_error);
712         }
713 #endif
714
715 #ifdef DEV_NETMAP
716         ixl_netmap_attach(vsi);
717 #endif /* DEV_NETMAP */
718         INIT_DEBUGOUT("ixl_attach: end");
719         return (0);
720
721 err_late:
722         if (vsi->ifp != NULL)
723                 if_free(vsi->ifp);
724 err_mac_hmc:
725         i40e_shutdown_lan_hmc(hw);
726 err_get_cap:
727         i40e_shutdown_adminq(hw);
728 err_out:
729         ixl_free_pci_resources(pf);
730         ixl_free_vsi(vsi);
731         IXL_PF_LOCK_DESTROY(pf);
732         return (error);
733 }
734
735 /*********************************************************************
736  *  Device removal routine
737  *
738  *  The detach entry point is called when the driver is being removed.
739  *  This routine stops the adapter and deallocates all the resources
740  *  that were allocated for driver operation.
741  *
742  *  return 0 on success, positive on failure
743  *********************************************************************/
744
745 static int
746 ixl_detach(device_t dev)
747 {
748         struct ixl_pf           *pf = device_get_softc(dev);
749         struct i40e_hw          *hw = &pf->hw;
750         struct ixl_vsi          *vsi = &pf->vsi;
751         struct ixl_queue        *que = vsi->queues;
752         i40e_status             status;
753 #ifdef PCI_IOV
754         int                     error;
755 #endif
756
757         INIT_DEBUGOUT("ixl_detach: begin");
758
759         /* Make sure VLANS are not using driver */
760         if (vsi->ifp->if_vlantrunk != NULL) {
761                 device_printf(dev,"Vlan in use, detach first\n");
762                 return (EBUSY);
763         }
764
765 #ifdef PCI_IOV
766         error = pci_iov_detach(dev);
767         if (error != 0) {
768                 device_printf(dev, "SR-IOV in use; detach first.\n");
769                 return (error);
770         }
771 #endif
772
773         ether_ifdetach(vsi->ifp);
774         if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
775                 IXL_PF_LOCK(pf);
776                 ixl_stop(pf);
777                 IXL_PF_UNLOCK(pf);
778         }
779
780         for (int i = 0; i < vsi->num_queues; i++, que++) {
781                 if (que->tq) {
782                         taskqueue_drain(que->tq, &que->task);
783                         taskqueue_drain(que->tq, &que->tx_task);
784                         taskqueue_free(que->tq);
785                 }
786         }
787
788         /* Shutdown LAN HMC */
789         status = i40e_shutdown_lan_hmc(hw);
790         if (status)
791                 device_printf(dev,
792                     "Shutdown LAN HMC failed with code %d\n", status);
793
794         /* Shutdown admin queue */
795         status = i40e_shutdown_adminq(hw);
796         if (status)
797                 device_printf(dev,
798                     "Shutdown Admin queue failed with code %d\n", status);
799
800         /* Unregister VLAN events */
801         if (vsi->vlan_attach != NULL)
802                 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
803         if (vsi->vlan_detach != NULL)
804                 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
805
806         callout_drain(&pf->timer);
807 #ifdef DEV_NETMAP
808         netmap_detach(vsi->ifp);
809 #endif /* DEV_NETMAP */
810         ixl_free_pci_resources(pf);
811         bus_generic_detach(dev);
812         if_free(vsi->ifp);
813         ixl_free_vsi(vsi);
814         IXL_PF_LOCK_DESTROY(pf);
815         return (0);
816 }
817
818 /*********************************************************************
819  *
820  *  Shutdown entry point
821  *
822  **********************************************************************/
823
824 static int
825 ixl_shutdown(device_t dev)
826 {
827         struct ixl_pf *pf = device_get_softc(dev);
828         IXL_PF_LOCK(pf);
829         ixl_stop(pf);
830         IXL_PF_UNLOCK(pf);
831         return (0);
832 }
833
834
835 /*********************************************************************
836  *
837  *  Get the hardware capabilities
838  *
839  **********************************************************************/
840
841 static int
842 ixl_get_hw_capabilities(struct ixl_pf *pf)
843 {
844         struct i40e_aqc_list_capabilities_element_resp *buf;
845         struct i40e_hw  *hw = &pf->hw;
846         device_t        dev = pf->dev;
847         int             error, len;
848         u16             needed;
849         bool            again = TRUE;
850
851         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
852 retry:
853         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
854             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
855                 device_printf(dev, "Unable to allocate cap memory\n");
856                 return (ENOMEM);
857         }
858
859         /* This populates the hw struct */
860         error = i40e_aq_discover_capabilities(hw, buf, len,
861             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
862         free(buf, M_DEVBUF);
863         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
864             (again == TRUE)) {
865                 /* retry once with a larger buffer */
866                 again = FALSE;
867                 len = needed;
868                 goto retry;
869         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
870                 device_printf(dev, "capability discovery failed: %d\n",
871                     pf->hw.aq.asq_last_status);
872                 return (ENODEV);
873         }
874
875         /* Capture this PF's starting queue pair */
876         pf->qbase = hw->func_caps.base_queue;
877
878 #ifdef IXL_DEBUG
879         device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
880             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
881             hw->pf_id, hw->func_caps.num_vfs,
882             hw->func_caps.num_msix_vectors,
883             hw->func_caps.num_msix_vectors_vf,
884             hw->func_caps.fd_filters_guaranteed,
885             hw->func_caps.fd_filters_best_effort,
886             hw->func_caps.num_tx_qp,
887             hw->func_caps.num_rx_qp,
888             hw->func_caps.base_queue);
889 #endif
890         return (error);
891 }
892
893 static void
894 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
895 {
896         device_t        dev = vsi->dev;
897
898         /* Enable/disable TXCSUM/TSO4 */
899         if (!(ifp->if_capenable & IFCAP_TXCSUM)
900             && !(ifp->if_capenable & IFCAP_TSO4)) {
901                 if (mask & IFCAP_TXCSUM) {
902                         ifp->if_capenable |= IFCAP_TXCSUM;
903                         /* enable TXCSUM, restore TSO if previously enabled */
904                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
905                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
906                                 ifp->if_capenable |= IFCAP_TSO4;
907                         }
908                 }
909                 else if (mask & IFCAP_TSO4) {
910                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
911                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
912                         device_printf(dev,
913                             "TSO4 requires txcsum, enabling both...\n");
914                 }
915         } else if((ifp->if_capenable & IFCAP_TXCSUM)
916             && !(ifp->if_capenable & IFCAP_TSO4)) {
917                 if (mask & IFCAP_TXCSUM)
918                         ifp->if_capenable &= ~IFCAP_TXCSUM;
919                 else if (mask & IFCAP_TSO4)
920                         ifp->if_capenable |= IFCAP_TSO4;
921         } else if((ifp->if_capenable & IFCAP_TXCSUM)
922             && (ifp->if_capenable & IFCAP_TSO4)) {
923                 if (mask & IFCAP_TXCSUM) {
924                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
925                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
926                         device_printf(dev, 
927                             "TSO4 requires txcsum, disabling both...\n");
928                 } else if (mask & IFCAP_TSO4)
929                         ifp->if_capenable &= ~IFCAP_TSO4;
930         }
931
932         /* Enable/disable TXCSUM_IPV6/TSO6 */
933         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
934             && !(ifp->if_capenable & IFCAP_TSO6)) {
935                 if (mask & IFCAP_TXCSUM_IPV6) {
936                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
937                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
938                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
939                                 ifp->if_capenable |= IFCAP_TSO6;
940                         }
941                 } else if (mask & IFCAP_TSO6) {
942                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
943                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
944                         device_printf(dev,
945                             "TSO6 requires txcsum6, enabling both...\n");
946                 }
947         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
948             && !(ifp->if_capenable & IFCAP_TSO6)) {
949                 if (mask & IFCAP_TXCSUM_IPV6)
950                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
951                 else if (mask & IFCAP_TSO6)
952                         ifp->if_capenable |= IFCAP_TSO6;
953         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
954             && (ifp->if_capenable & IFCAP_TSO6)) {
955                 if (mask & IFCAP_TXCSUM_IPV6) {
956                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
957                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
958                         device_printf(dev,
959                             "TSO6 requires txcsum6, disabling both...\n");
960                 } else if (mask & IFCAP_TSO6)
961                         ifp->if_capenable &= ~IFCAP_TSO6;
962         }
963 }
964
965 /*********************************************************************
966  *  Ioctl entry point
967  *
968  *  ixl_ioctl is called when the user wants to configure the
969  *  interface.
970  *
971  *  return 0 on success, positive on failure
972  **********************************************************************/
973
974 static int
975 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
976 {
977         struct ixl_vsi  *vsi = ifp->if_softc;
978         struct ixl_pf   *pf = vsi->back;
979         struct ifreq    *ifr = (struct ifreq *) data;
980 #if defined(INET) || defined(INET6)
981         struct ifaddr *ifa = (struct ifaddr *)data;
982         bool            avoid_reset = FALSE;
983 #endif
984         int             error = 0;
985
986         switch (command) {
987
988         case SIOCSIFADDR:
989 #ifdef INET
990                 if (ifa->ifa_addr->sa_family == AF_INET)
991                         avoid_reset = TRUE;
992 #endif
993 #ifdef INET6
994                 if (ifa->ifa_addr->sa_family == AF_INET6)
995                         avoid_reset = TRUE;
996 #endif
997 #if defined(INET) || defined(INET6)
998                 /*
999                 ** Calling init results in link renegotiation,
1000                 ** so we avoid doing it when possible.
1001                 */
1002                 if (avoid_reset) {
1003                         ifp->if_flags |= IFF_UP;
1004                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1005                                 ixl_init(pf);
1006 #ifdef INET
1007                         if (!(ifp->if_flags & IFF_NOARP))
1008                                 arp_ifinit(ifp, ifa);
1009 #endif
1010                 } else
1011                         error = ether_ioctl(ifp, command, data);
1012                 break;
1013 #endif
1014         case SIOCSIFMTU:
1015                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1016                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1017                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1018                         error = EINVAL;
1019                 } else {
1020                         IXL_PF_LOCK(pf);
1021                         ifp->if_mtu = ifr->ifr_mtu;
1022                         vsi->max_frame_size =
1023                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1024                             + ETHER_VLAN_ENCAP_LEN;
1025                         ixl_init_locked(pf);
1026                         IXL_PF_UNLOCK(pf);
1027                 }
1028                 break;
1029         case SIOCSIFFLAGS:
1030                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1031                 IXL_PF_LOCK(pf);
1032                 if (ifp->if_flags & IFF_UP) {
1033                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1034                                 if ((ifp->if_flags ^ pf->if_flags) &
1035                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1036                                         ixl_set_promisc(vsi);
1037                                 }
1038                         } else
1039                                 ixl_init_locked(pf);
1040                 } else
1041                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1042                                 ixl_stop(pf);
1043                 pf->if_flags = ifp->if_flags;
1044                 IXL_PF_UNLOCK(pf);
1045                 break;
1046         case SIOCADDMULTI:
1047                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1048                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1049                         IXL_PF_LOCK(pf);
1050                         ixl_disable_intr(vsi);
1051                         ixl_add_multi(vsi);
1052                         ixl_enable_intr(vsi);
1053                         IXL_PF_UNLOCK(pf);
1054                 }
1055                 break;
1056         case SIOCDELMULTI:
1057                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1058                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1059                         IXL_PF_LOCK(pf);
1060                         ixl_disable_intr(vsi);
1061                         ixl_del_multi(vsi);
1062                         ixl_enable_intr(vsi);
1063                         IXL_PF_UNLOCK(pf);
1064                 }
1065                 break;
1066         case SIOCSIFMEDIA:
1067         case SIOCGIFMEDIA:
1068 #ifdef IFM_ETH_XTYPE
1069         case SIOCGIFXMEDIA:
1070 #endif
1071                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1072                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1073                 break;
1074         case SIOCSIFCAP:
1075         {
1076                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1077                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1078
1079                 ixl_cap_txcsum_tso(vsi, ifp, mask);
1080
1081                 if (mask & IFCAP_RXCSUM)
1082                         ifp->if_capenable ^= IFCAP_RXCSUM;
1083                 if (mask & IFCAP_RXCSUM_IPV6)
1084                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1085                 if (mask & IFCAP_LRO)
1086                         ifp->if_capenable ^= IFCAP_LRO;
1087                 if (mask & IFCAP_VLAN_HWTAGGING)
1088                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1089                 if (mask & IFCAP_VLAN_HWFILTER)
1090                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1091                 if (mask & IFCAP_VLAN_HWTSO)
1092                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1093                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1094                         IXL_PF_LOCK(pf);
1095                         ixl_init_locked(pf);
1096                         IXL_PF_UNLOCK(pf);
1097                 }
1098                 VLAN_CAPABILITIES(ifp);
1099
1100                 break;
1101         }
1102
1103         default:
1104                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1105                 error = ether_ioctl(ifp, command, data);
1106                 break;
1107         }
1108
1109         return (error);
1110 }
1111
1112
1113 /*********************************************************************
1114  *  Init entry point
1115  *
1116  *  This routine is used in two ways. It is used by the stack as
1117  *  init entry point in network interface structure. It is also used
1118  *  by the driver as a hw/sw initialization routine to get to a
1119  *  consistent state.
1120  *
1121  *  return 0 on success, positive on failure
1122  **********************************************************************/
1123
1124 static void
1125 ixl_init_locked(struct ixl_pf *pf)
1126 {
1127         struct i40e_hw  *hw = &pf->hw;
1128         struct ixl_vsi  *vsi = &pf->vsi;
1129         struct ifnet    *ifp = vsi->ifp;
1130         device_t        dev = pf->dev;
1131         struct i40e_filter_control_settings     filter;
1132         u8              tmpaddr[ETHER_ADDR_LEN];
1133         int             ret;
1134
1135         mtx_assert(&pf->pf_mtx, MA_OWNED);
1136         INIT_DEBUGOUT("ixl_init: begin");
1137         ixl_stop(pf);
1138
1139         /* Get the latest mac address... User might use a LAA */
1140         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1141               I40E_ETH_LENGTH_OF_ADDRESS);
1142         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
1143             (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1144                 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1145                 bcopy(tmpaddr, hw->mac.addr,
1146                     I40E_ETH_LENGTH_OF_ADDRESS);
1147                 ret = i40e_aq_mac_address_write(hw,
1148                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
1149                     hw->mac.addr, NULL);
1150                 if (ret) {
1151                         device_printf(dev, "LLA address"
1152                          "change failed!!\n");
1153                         return;
1154                 } else {
1155                         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1156                 }
1157         }
1158
1159         /* Set the various hardware offload abilities */
1160         ifp->if_hwassist = 0;
1161         if (ifp->if_capenable & IFCAP_TSO)
1162                 ifp->if_hwassist |= CSUM_TSO;
1163         if (ifp->if_capenable & IFCAP_TXCSUM)
1164                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1165         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1166                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1167
1168         /* Set up the device filtering */
1169         bzero(&filter, sizeof(filter));
1170         filter.enable_ethtype = TRUE;
1171         filter.enable_macvlan = TRUE;
1172 #ifdef IXL_FDIR
1173         filter.enable_fdir = TRUE;
1174 #endif
1175         if (i40e_set_filter_control(hw, &filter))
1176                 device_printf(dev, "set_filter_control() failed\n");
1177
1178         /* Set up RSS */
1179         ixl_config_rss(vsi);
1180
1181         /*
1182         ** Prepare the VSI: rings, hmc contexts, etc...
1183         */
1184         if (ixl_initialize_vsi(vsi)) {
1185                 device_printf(dev, "initialize vsi failed!!\n");
1186                 return;
1187         }
1188
1189         /* Add protocol filters to list */
1190         ixl_init_filters(vsi);
1191
1192         /* Setup vlan's if needed */
1193         ixl_setup_vlan_filters(vsi);
1194
1195         /* Start the local timer */
1196         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1197
1198         /* Set up MSI/X routing and the ITR settings */
1199         if (ixl_enable_msix) {
1200                 ixl_configure_msix(pf);
1201                 ixl_configure_itr(pf);
1202         } else
1203                 ixl_configure_legacy(pf);
1204
1205         ixl_enable_rings(vsi);
1206
1207         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1208
1209         ixl_reconfigure_filters(vsi);
1210
1211         /* Set MTU in hardware*/
1212         int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1213             TRUE, 0, NULL);
1214         if (aq_error)
1215                 device_printf(vsi->dev,
1216                         "aq_set_mac_config in init error, code %d\n",
1217                     aq_error);
1218
1219         /* And now turn on interrupts */
1220         ixl_enable_intr(vsi);
1221
1222         /* Now inform the stack we're ready */
1223         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1224         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1225
1226         return;
1227 }
1228
1229 static void
1230 ixl_init(void *arg)
1231 {
1232         struct ixl_pf *pf = arg;
1233
1234         IXL_PF_LOCK(pf);
1235         ixl_init_locked(pf);
1236         IXL_PF_UNLOCK(pf);
1237         return;
1238 }
1239
1240 /*
1241 **
1242 ** MSIX Interrupt Handlers and Tasklets
1243 **
1244 */
1245 static void
1246 ixl_handle_que(void *context, int pending)
1247 {
1248         struct ixl_queue *que = context;
1249         struct ixl_vsi *vsi = que->vsi;
1250         struct i40e_hw  *hw = vsi->hw;
1251         struct tx_ring  *txr = &que->txr;
1252         struct ifnet    *ifp = vsi->ifp;
1253         bool            more;
1254
1255         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1256                 more = ixl_rxeof(que, IXL_RX_LIMIT);
1257                 IXL_TX_LOCK(txr);
1258                 ixl_txeof(que);
1259                 if (!drbr_empty(ifp, txr->br))
1260                         ixl_mq_start_locked(ifp, txr);
1261                 IXL_TX_UNLOCK(txr);
1262                 if (more) {
1263                         taskqueue_enqueue(que->tq, &que->task);
1264                         return;
1265                 }
1266         }
1267
1268         /* Reenable this interrupt - hmmm */
1269         ixl_enable_queue(hw, que->me);
1270         return;
1271 }
1272
1273
1274 /*********************************************************************
1275  *
1276  *  Legacy Interrupt Service routine
1277  *
1278  **********************************************************************/
1279 void
1280 ixl_intr(void *arg)
1281 {
1282         struct ixl_pf           *pf = arg;
1283         struct i40e_hw          *hw =  &pf->hw;
1284         struct ixl_vsi          *vsi = &pf->vsi;
1285         struct ixl_queue        *que = vsi->queues;
1286         struct ifnet            *ifp = vsi->ifp;
1287         struct tx_ring          *txr = &que->txr;
1288         u32                     reg, icr0, mask;
1289         bool                    more_tx, more_rx;
1290
1291         ++que->irqs;
1292
1293         /* Protect against spurious interrupts */
1294         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1295                 return;
1296
1297         icr0 = rd32(hw, I40E_PFINT_ICR0);
1298
1299         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1300         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1301         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1302
1303         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1304
1305 #ifdef PCI_IOV
1306         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1307                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1308 #endif
1309
1310         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1311                 taskqueue_enqueue(pf->tq, &pf->adminq);
1312                 return;
1313         }
1314
1315         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1316
1317         IXL_TX_LOCK(txr);
1318         more_tx = ixl_txeof(que);
1319         if (!drbr_empty(vsi->ifp, txr->br))
1320                 more_tx = 1;
1321         IXL_TX_UNLOCK(txr);
1322
1323         /* re-enable other interrupt causes */
1324         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1325
1326         /* And now the queues */
1327         reg = rd32(hw, I40E_QINT_RQCTL(0));
1328         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1329         wr32(hw, I40E_QINT_RQCTL(0), reg);
1330
1331         reg = rd32(hw, I40E_QINT_TQCTL(0));
1332         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1333         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1334         wr32(hw, I40E_QINT_TQCTL(0), reg);
1335
1336         ixl_enable_legacy(hw);
1337
1338         return;
1339 }
1340
1341
1342 /*********************************************************************
1343  *
1344  *  MSIX VSI Interrupt Service routine
1345  *
1346  **********************************************************************/
1347 void
1348 ixl_msix_que(void *arg)
1349 {
1350         struct ixl_queue        *que = arg;
1351         struct ixl_vsi  *vsi = que->vsi;
1352         struct i40e_hw  *hw = vsi->hw;
1353         struct tx_ring  *txr = &que->txr;
1354         bool            more_tx, more_rx;
1355
1356         /* Protect against spurious interrupts */
1357         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1358                 return;
1359
1360         ++que->irqs;
1361
1362         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1363
1364         IXL_TX_LOCK(txr);
1365         more_tx = ixl_txeof(que);
1366         /*
1367         ** Make certain that if the stack 
1368         ** has anything queued the task gets
1369         ** scheduled to handle it.
1370         */
1371         if (!drbr_empty(vsi->ifp, txr->br))
1372                 more_tx = 1;
1373         IXL_TX_UNLOCK(txr);
1374
1375         ixl_set_queue_rx_itr(que);
1376         ixl_set_queue_tx_itr(que);
1377
1378         if (more_tx || more_rx)
1379                 taskqueue_enqueue(que->tq, &que->task);
1380         else
1381                 ixl_enable_queue(hw, que->me);
1382
1383         return;
1384 }
1385
1386
1387 /*********************************************************************
1388  *
1389  *  MSIX Admin Queue Interrupt Service routine
1390  *
1391  **********************************************************************/
1392 static void
1393 ixl_msix_adminq(void *arg)
1394 {
1395         struct ixl_pf   *pf = arg;
1396         struct i40e_hw  *hw = &pf->hw;
1397         u32             reg, mask;
1398
1399         ++pf->admin_irq;
1400
1401         reg = rd32(hw, I40E_PFINT_ICR0);
1402         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1403
1404         /* Check on the cause */
1405         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1406                 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1407
1408         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1409                 ixl_handle_mdd_event(pf);
1410                 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1411         }
1412
1413 #ifdef PCI_IOV
1414         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1415                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1416                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1417         }
1418 #endif
1419
1420         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1421         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1422         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1423
1424         taskqueue_enqueue(pf->tq, &pf->adminq);
1425         return;
1426 }
1427
1428 /*********************************************************************
1429  *
1430  *  Media Ioctl callback
1431  *
1432  *  This routine is called whenever the user queries the status of
1433  *  the interface using ifconfig.
1434  *
1435  **********************************************************************/
1436 static void
1437 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1438 {
1439         struct ixl_vsi  *vsi = ifp->if_softc;
1440         struct ixl_pf   *pf = vsi->back;
1441         struct i40e_hw  *hw = &pf->hw;
1442
1443         INIT_DEBUGOUT("ixl_media_status: begin");
1444         IXL_PF_LOCK(pf);
1445
1446         hw->phy.get_link_info = TRUE;
1447         i40e_get_link_status(hw, &pf->link_up);
1448         ixl_update_link_status(pf);
1449
1450         ifmr->ifm_status = IFM_AVALID;
1451         ifmr->ifm_active = IFM_ETHER;
1452
1453         if (!pf->link_up) {
1454                 IXL_PF_UNLOCK(pf);
1455                 return;
1456         }
1457
1458         ifmr->ifm_status |= IFM_ACTIVE;
1459         /* Hardware is always full-duplex */
1460         ifmr->ifm_active |= IFM_FDX;
1461
1462         switch (hw->phy.link_info.phy_type) {
1463                 /* 100 M */
1464                 case I40E_PHY_TYPE_100BASE_TX:
1465                         ifmr->ifm_active |= IFM_100_TX;
1466                         break;
1467                 /* 1 G */
1468                 case I40E_PHY_TYPE_1000BASE_T:
1469                         ifmr->ifm_active |= IFM_1000_T;
1470                         break;
1471                 case I40E_PHY_TYPE_1000BASE_SX:
1472                         ifmr->ifm_active |= IFM_1000_SX;
1473                         break;
1474                 case I40E_PHY_TYPE_1000BASE_LX:
1475                         ifmr->ifm_active |= IFM_1000_LX;
1476                         break;
1477                 /* 10 G */
1478                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1479                         ifmr->ifm_active |= IFM_10G_TWINAX;
1480                         break;
1481                 case I40E_PHY_TYPE_10GBASE_SR:
1482                         ifmr->ifm_active |= IFM_10G_SR;
1483                         break;
1484                 case I40E_PHY_TYPE_10GBASE_LR:
1485                         ifmr->ifm_active |= IFM_10G_LR;
1486                         break;
1487                 case I40E_PHY_TYPE_10GBASE_T:
1488                         ifmr->ifm_active |= IFM_10G_T;
1489                         break;
1490                 /* 40 G */
1491                 case I40E_PHY_TYPE_40GBASE_CR4:
1492                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1493                         ifmr->ifm_active |= IFM_40G_CR4;
1494                         break;
1495                 case I40E_PHY_TYPE_40GBASE_SR4:
1496                         ifmr->ifm_active |= IFM_40G_SR4;
1497                         break;
1498                 case I40E_PHY_TYPE_40GBASE_LR4:
1499                         ifmr->ifm_active |= IFM_40G_LR4;
1500                         break;
1501 #ifndef IFM_ETH_XTYPE
1502                 case I40E_PHY_TYPE_1000BASE_KX:
1503                         ifmr->ifm_active |= IFM_1000_CX;
1504                         break;
1505                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1506                 case I40E_PHY_TYPE_10GBASE_CR1:
1507                         ifmr->ifm_active |= IFM_10G_TWINAX;
1508                         break;
1509                 case I40E_PHY_TYPE_10GBASE_KX4:
1510                         ifmr->ifm_active |= IFM_10G_CX4;
1511                         break;
1512                 case I40E_PHY_TYPE_10GBASE_KR:
1513                         ifmr->ifm_active |= IFM_10G_SR;
1514                         break;
1515                 case I40E_PHY_TYPE_40GBASE_KR4:
1516                 case I40E_PHY_TYPE_XLPPI:
1517                         ifmr->ifm_active |= IFM_40G_SR4;
1518                         break;
1519 #else
1520                 case I40E_PHY_TYPE_1000BASE_KX:
1521                         ifmr->ifm_active |= IFM_1000_KX;
1522                         break;
1523                 /* ERJ: What's the difference between these? */
1524                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1525                 case I40E_PHY_TYPE_10GBASE_CR1:
1526                         ifmr->ifm_active |= IFM_10G_CR1;
1527                         break;
1528                 case I40E_PHY_TYPE_10GBASE_KX4:
1529                         ifmr->ifm_active |= IFM_10G_KX4;
1530                         break;
1531                 case I40E_PHY_TYPE_10GBASE_KR:
1532                         ifmr->ifm_active |= IFM_10G_KR;
1533                         break;
1534                 case I40E_PHY_TYPE_20GBASE_KR2:
1535                         ifmr->ifm_active |= IFM_20G_KR2;
1536                         break;
1537                 case I40E_PHY_TYPE_40GBASE_KR4:
1538                         ifmr->ifm_active |= IFM_40G_KR4;
1539                         break;
1540                 case I40E_PHY_TYPE_XLPPI:
1541                         ifmr->ifm_active |= IFM_40G_XLPPI;
1542                         break;
1543 #endif
1544                 default:
1545                         ifmr->ifm_active |= IFM_UNKNOWN;
1546                         break;
1547         }
1548         /* Report flow control status as well */
1549         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1550                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1551         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1552                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1553
1554         IXL_PF_UNLOCK(pf);
1555
1556         return;
1557 }
1558
1559 /*********************************************************************
1560  *
1561  *  Media Ioctl callback
1562  *
1563  *  This routine is called when the user changes speed/duplex using
1564  *  media/mediopt option with ifconfig.
1565  *
1566  **********************************************************************/
1567 static int
1568 ixl_media_change(struct ifnet * ifp)
1569 {
1570         struct ixl_vsi *vsi = ifp->if_softc;
1571         struct ifmedia *ifm = &vsi->media;
1572
1573         INIT_DEBUGOUT("ixl_media_change: begin");
1574
1575         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1576                 return (EINVAL);
1577
1578         if_printf(ifp, "Media change is currently not supported.\n");
1579
1580         return (ENODEV);
1581 }
1582
1583
1584 #ifdef IXL_FDIR
1585 /*
1586 ** ATR: Application Targetted Receive - creates a filter
1587 **      based on TX flow info that will keep the receive
1588 **      portion of the flow on the same queue. Based on the
1589 **      implementation this is only available for TCP connections
1590 */
1591 void
1592 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1593 {
1594         struct ixl_vsi                  *vsi = que->vsi;
1595         struct tx_ring                  *txr = &que->txr;
1596         struct i40e_filter_program_desc *FDIR;
1597         u32                             ptype, dtype;
1598         int                             idx;
1599
1600         /* check if ATR is enabled and sample rate */
1601         if ((!ixl_enable_fdir) || (!txr->atr_rate))
1602                 return;
1603         /*
1604         ** We sample all TCP SYN/FIN packets,
1605         ** or at the selected sample rate 
1606         */
1607         txr->atr_count++;
1608         if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1609             (txr->atr_count < txr->atr_rate))
1610                 return;
1611         txr->atr_count = 0;
1612
1613         /* Get a descriptor to use */
1614         idx = txr->next_avail;
1615         FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1616         if (++idx == que->num_desc)
1617                 idx = 0;
1618         txr->avail--;
1619         txr->next_avail = idx;
1620
1621         ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1622             I40E_TXD_FLTR_QW0_QINDEX_MASK;
1623
1624         ptype |= (etype == ETHERTYPE_IP) ?
1625             (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1626             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1627             (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1628             I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1629
1630         ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1631
1632         dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1633
1634         /*
1635         ** We use the TCP TH_FIN as a trigger to remove
1636         ** the filter, otherwise its an update.
1637         */
1638         dtype |= (th->th_flags & TH_FIN) ?
1639             (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1640             I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1641             (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1642             I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1643
1644         dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1645             I40E_TXD_FLTR_QW1_DEST_SHIFT;
1646
1647         dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1648             I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1649
1650         FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1651         FDIR->dtype_cmd_cntindex = htole32(dtype);
1652         return;
1653 }
1654 #endif
1655
1656
1657 static void
1658 ixl_set_promisc(struct ixl_vsi *vsi)
1659 {
1660         struct ifnet    *ifp = vsi->ifp;
1661         struct i40e_hw  *hw = vsi->hw;
1662         int             err, mcnt = 0;
1663         bool            uni = FALSE, multi = FALSE;
1664
1665         if (ifp->if_flags & IFF_ALLMULTI)
1666                 multi = TRUE;
1667         else { /* Need to count the multicast addresses */
1668                 struct  ifmultiaddr *ifma;
1669                 if_maddr_rlock(ifp);
1670                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1671                         if (ifma->ifma_addr->sa_family != AF_LINK)
1672                                 continue;
1673                         if (mcnt == MAX_MULTICAST_ADDR)
1674                                 break;
1675                         mcnt++;
1676                 }
1677                 if_maddr_runlock(ifp);
1678         }
1679
1680         if (mcnt >= MAX_MULTICAST_ADDR)
1681                 multi = TRUE;
1682         if (ifp->if_flags & IFF_PROMISC)
1683                 uni = TRUE;
1684
1685         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1686             vsi->seid, uni, NULL);
1687         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1688             vsi->seid, multi, NULL);
1689         return;
1690 }
1691
1692 /*********************************************************************
1693  *      Filter Routines
1694  *
1695  *      Routines for multicast and vlan filter management.
1696  *
1697  *********************************************************************/
1698 static void
1699 ixl_add_multi(struct ixl_vsi *vsi)
1700 {
1701         struct  ifmultiaddr     *ifma;
1702         struct ifnet            *ifp = vsi->ifp;
1703         struct i40e_hw          *hw = vsi->hw;
1704         int                     mcnt = 0, flags;
1705
1706         IOCTL_DEBUGOUT("ixl_add_multi: begin");
1707
1708         if_maddr_rlock(ifp);
1709         /*
1710         ** First just get a count, to decide if we
1711         ** we simply use multicast promiscuous.
1712         */
1713         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1714                 if (ifma->ifma_addr->sa_family != AF_LINK)
1715                         continue;
1716                 mcnt++;
1717         }
1718         if_maddr_runlock(ifp);
1719
1720         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1721                 /* delete existing MC filters */
1722                 ixl_del_hw_filters(vsi, mcnt);
1723                 i40e_aq_set_vsi_multicast_promiscuous(hw,
1724                     vsi->seid, TRUE, NULL);
1725                 return;
1726         }
1727
1728         mcnt = 0;
1729         if_maddr_rlock(ifp);
1730         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1731                 if (ifma->ifma_addr->sa_family != AF_LINK)
1732                         continue;
1733                 ixl_add_mc_filter(vsi,
1734                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1735                 mcnt++;
1736         }
1737         if_maddr_runlock(ifp);
1738         if (mcnt > 0) {
1739                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1740                 ixl_add_hw_filters(vsi, flags, mcnt);
1741         }
1742
1743         IOCTL_DEBUGOUT("ixl_add_multi: end");
1744         return;
1745 }
1746
1747 static void
1748 ixl_del_multi(struct ixl_vsi *vsi)
1749 {
1750         struct ifnet            *ifp = vsi->ifp;
1751         struct ifmultiaddr      *ifma;
1752         struct ixl_mac_filter   *f;
1753         int                     mcnt = 0;
1754         bool            match = FALSE;
1755
1756         IOCTL_DEBUGOUT("ixl_del_multi: begin");
1757
1758         /* Search for removed multicast addresses */
1759         if_maddr_rlock(ifp);
1760         SLIST_FOREACH(f, &vsi->ftl, next) {
1761                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1762                         match = FALSE;
1763                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1764                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1765                                         continue;
1766                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1767                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1768                                         match = TRUE;
1769                                         break;
1770                                 }
1771                         }
1772                         if (match == FALSE) {
1773                                 f->flags |= IXL_FILTER_DEL;
1774                                 mcnt++;
1775                         }
1776                 }
1777         }
1778         if_maddr_runlock(ifp);
1779
1780         if (mcnt > 0)
1781                 ixl_del_hw_filters(vsi, mcnt);
1782 }
1783
1784
1785 /*********************************************************************
1786  *  Timer routine
1787  *
1788  *  This routine checks for link status,updates statistics,
1789  *  and runs the watchdog check.
1790  *
1791  **********************************************************************/
1792
1793 static void
1794 ixl_local_timer(void *arg)
1795 {
1796         struct ixl_pf           *pf = arg;
1797         struct i40e_hw          *hw = &pf->hw;
1798         struct ixl_vsi          *vsi = &pf->vsi;
1799         struct ixl_queue        *que = vsi->queues;
1800         device_t                dev = pf->dev;
1801         int                     hung = 0;
1802         u32                     mask;
1803
1804         mtx_assert(&pf->pf_mtx, MA_OWNED);
1805
1806         /* Fire off the adminq task */
1807         taskqueue_enqueue(pf->tq, &pf->adminq);
1808
1809         /* Update stats */
1810         ixl_update_stats_counters(pf);
1811
1812         /*
1813         ** Check status of the queues
1814         */
1815         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1816                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1817  
1818         for (int i = 0; i < vsi->num_queues; i++,que++) {
1819                 /* Any queues with outstanding work get a sw irq */
1820                 if (que->busy)
1821                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1822                 /*
1823                 ** Each time txeof runs without cleaning, but there
1824                 ** are uncleaned descriptors it increments busy. If
1825                 ** we get to 5 we declare it hung.
1826                 */
1827                 if (que->busy == IXL_QUEUE_HUNG) {
1828                         ++hung;
1829                         /* Mark the queue as inactive */
1830                         vsi->active_queues &= ~((u64)1 << que->me);
1831                         continue;
1832                 } else {
1833                         /* Check if we've come back from hung */
1834                         if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1835                                 vsi->active_queues |= ((u64)1 << que->me);
1836                 }
1837                 if (que->busy >= IXL_MAX_TX_BUSY) {
1838 #ifdef IXL_DEBUG
1839                         device_printf(dev,"Warning queue %d "
1840                             "appears to be hung!\n", i);
1841 #endif
1842                         que->busy = IXL_QUEUE_HUNG;
1843                         ++hung;
1844                 }
1845         }
1846         /* Only reinit if all queues show hung */
1847         if (hung == vsi->num_queues)
1848                 goto hung;
1849
1850         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1851         return;
1852
1853 hung:
1854         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1855         ixl_init_locked(pf);
1856 }
1857
1858 /*
1859 ** Note: this routine updates the OS on the link state
1860 **      the real check of the hardware only happens with
1861 **      a link interrupt.
1862 */
1863 static void
1864 ixl_update_link_status(struct ixl_pf *pf)
1865 {
1866         struct ixl_vsi          *vsi = &pf->vsi;
1867         struct i40e_hw          *hw = &pf->hw;
1868         struct ifnet            *ifp = vsi->ifp;
1869         device_t                dev = pf->dev;
1870
1871         if (pf->link_up){ 
1872                 if (vsi->link_active == FALSE) {
1873                         pf->fc = hw->fc.current_mode;
1874                         if (bootverbose) {
1875                                 device_printf(dev,"Link is up %d Gbps %s,"
1876                                     " Flow Control: %s\n",
1877                                     ((pf->link_speed ==
1878                                     I40E_LINK_SPEED_40GB)? 40:10),
1879                                     "Full Duplex", ixl_fc_string[pf->fc]);
1880                         }
1881                         vsi->link_active = TRUE;
1882                         /*
1883                         ** Warn user if link speed on NPAR enabled
1884                         ** partition is not at least 10GB
1885                         */
1886                         if (hw->func_caps.npar_enable &&
1887                            (hw->phy.link_info.link_speed ==
1888                            I40E_LINK_SPEED_1GB ||
1889                            hw->phy.link_info.link_speed ==
1890                            I40E_LINK_SPEED_100MB))
1891                                 device_printf(dev, "The partition detected"
1892                                     "link speed that is less than 10Gbps\n");
1893                         if_link_state_change(ifp, LINK_STATE_UP);
1894                 }
1895         } else { /* Link down */
1896                 if (vsi->link_active == TRUE) {
1897                         if (bootverbose)
1898                                 device_printf(dev,"Link is Down\n");
1899                         if_link_state_change(ifp, LINK_STATE_DOWN);
1900                         vsi->link_active = FALSE;
1901                 }
1902         }
1903
1904         return;
1905 }
1906
1907 /*********************************************************************
1908  *
1909  *  This routine disables all traffic on the adapter by issuing a
1910  *  global reset on the MAC and deallocates TX/RX buffers.
1911  *
1912  **********************************************************************/
1913
1914 static void
1915 ixl_stop(struct ixl_pf *pf)
1916 {
1917         struct ixl_vsi  *vsi = &pf->vsi;
1918         struct ifnet    *ifp = vsi->ifp;
1919
1920         mtx_assert(&pf->pf_mtx, MA_OWNED);
1921
1922         INIT_DEBUGOUT("ixl_stop: begin\n");
1923         if (pf->num_vfs == 0)
1924                 ixl_disable_intr(vsi);
1925         else
1926                 ixl_disable_rings_intr(vsi);
1927         ixl_disable_rings(vsi);
1928
1929         /* Tell the stack that the interface is no longer active */
1930         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1931
1932         /* Stop the local timer */
1933         callout_stop(&pf->timer);
1934
1935         return;
1936 }
1937
1938
1939 /*********************************************************************
1940  *
1941  *  Setup MSIX Interrupt resources and handlers for the VSI
1942  *
1943  **********************************************************************/
1944 static int
1945 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1946 {
1947         device_t        dev = pf->dev;
1948         struct          ixl_vsi *vsi = &pf->vsi;
1949         struct          ixl_queue *que = vsi->queues;
1950         int             error, rid = 0;
1951
1952         if (pf->msix == 1)
1953                 rid = 1;
1954         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1955             &rid, RF_SHAREABLE | RF_ACTIVE);
1956         if (pf->res == NULL) {
1957                 device_printf(dev,"Unable to allocate"
1958                     " bus resource: vsi legacy/msi interrupt\n");
1959                 return (ENXIO);
1960         }
1961
1962         /* Set the handler function */
1963         error = bus_setup_intr(dev, pf->res,
1964             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1965             ixl_intr, pf, &pf->tag);
1966         if (error) {
1967                 pf->res = NULL;
1968                 device_printf(dev, "Failed to register legacy/msi handler");
1969                 return (error);
1970         }
1971         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1972         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1973         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1974         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1975             taskqueue_thread_enqueue, &que->tq);
1976         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1977             device_get_nameunit(dev));
1978         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1979
1980 #ifdef PCI_IOV
1981         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1982 #endif
1983
1984         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1985             taskqueue_thread_enqueue, &pf->tq);
1986         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1987             device_get_nameunit(dev));
1988
1989         return (0);
1990 }
1991
1992
1993 /*********************************************************************
1994  *
1995  *  Setup MSIX Interrupt resources and handlers for the VSI
1996  *
1997  **********************************************************************/
1998 static int
1999 ixl_assign_vsi_msix(struct ixl_pf *pf)
2000 {
2001         device_t        dev = pf->dev;
2002         struct          ixl_vsi *vsi = &pf->vsi;
2003         struct          ixl_queue *que = vsi->queues;
2004         struct          tx_ring  *txr;
2005         int             error, rid, vector = 0;
2006
2007         /* Admin Que is vector 0*/
2008         rid = vector + 1;
2009         pf->res = bus_alloc_resource_any(dev,
2010             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2011         if (!pf->res) {
2012                 device_printf(dev,"Unable to allocate"
2013             " bus resource: Adminq interrupt [%d]\n", rid);
2014                 return (ENXIO);
2015         }
2016         /* Set the adminq vector and handler */
2017         error = bus_setup_intr(dev, pf->res,
2018             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2019             ixl_msix_adminq, pf, &pf->tag);
2020         if (error) {
2021                 pf->res = NULL;
2022                 device_printf(dev, "Failed to register Admin que handler");
2023                 return (error);
2024         }
2025         bus_describe_intr(dev, pf->res, pf->tag, "aq");
2026         pf->admvec = vector;
2027         /* Tasklet for Admin Queue */
2028         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2029
2030 #ifdef PCI_IOV
2031         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2032 #endif
2033
2034         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2035             taskqueue_thread_enqueue, &pf->tq);
2036         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2037             device_get_nameunit(pf->dev));
2038         ++vector;
2039
2040         /* Now set up the stations */
2041         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2042                 int cpu_id = i;
2043                 rid = vector + 1;
2044                 txr = &que->txr;
2045                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2046                     RF_SHAREABLE | RF_ACTIVE);
2047                 if (que->res == NULL) {
2048                         device_printf(dev,"Unable to allocate"
2049                             " bus resource: que interrupt [%d]\n", vector);
2050                         return (ENXIO);
2051                 }
2052                 /* Set the handler function */
2053                 error = bus_setup_intr(dev, que->res,
2054                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2055                     ixl_msix_que, que, &que->tag);
2056                 if (error) {
2057                         que->res = NULL;
2058                         device_printf(dev, "Failed to register que handler");
2059                         return (error);
2060                 }
2061                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2062                 /* Bind the vector to a CPU */
2063 #ifdef RSS
2064                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2065 #endif
2066                 bus_bind_intr(dev, que->res, cpu_id);
2067                 que->msix = vector;
2068                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2069                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2070                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2071                     taskqueue_thread_enqueue, &que->tq);
2072 #ifdef RSS
2073                 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
2074                     cpu_id, "%s (bucket %d)",
2075                     device_get_nameunit(dev), cpu_id);
2076 #else
2077                 taskqueue_start_threads(&que->tq, 1, PI_NET,
2078                     "%s que", device_get_nameunit(dev));
2079 #endif
2080         }
2081
2082         return (0);
2083 }
2084
2085
2086 /*
2087  * Allocate MSI/X vectors
2088  */
2089 static int
2090 ixl_init_msix(struct ixl_pf *pf)
2091 {
2092         device_t dev = pf->dev;
2093         int rid, want, vectors, queues, available;
2094
2095         /* Override by tuneable */
2096         if (ixl_enable_msix == 0)
2097                 goto msi;
2098
2099         /*
2100         ** When used in a virtualized environment 
2101         ** PCI BUSMASTER capability may not be set
2102         ** so explicity set it here and rewrite
2103         ** the ENABLE in the MSIX control register
2104         ** at this point to cause the host to
2105         ** successfully initialize us.
2106         */
2107         {
2108                 u16 pci_cmd_word;
2109                 int msix_ctrl;
2110                 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2111                 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2112                 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2113                 pci_find_cap(dev, PCIY_MSIX, &rid);
2114                 rid += PCIR_MSIX_CTRL;
2115                 msix_ctrl = pci_read_config(dev, rid, 2);
2116                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2117                 pci_write_config(dev, rid, msix_ctrl, 2);
2118         }
2119
2120         /* First try MSI/X */
2121         rid = PCIR_BAR(IXL_BAR);
2122         pf->msix_mem = bus_alloc_resource_any(dev,
2123             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2124         if (!pf->msix_mem) {
2125                 /* May not be enabled */
2126                 device_printf(pf->dev,
2127                     "Unable to map MSIX table \n");
2128                 goto msi;
2129         }
2130
2131         available = pci_msix_count(dev); 
2132         if (available == 0) { /* system has msix disabled */
2133                 bus_release_resource(dev, SYS_RES_MEMORY,
2134                     rid, pf->msix_mem);
2135                 pf->msix_mem = NULL;
2136                 goto msi;
2137         }
2138
2139         /* Figure out a reasonable auto config value */
2140         queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2141
2142         /* Override with hardcoded value if sane */
2143         if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
2144                 queues = ixl_max_queues;
2145
2146 #ifdef  RSS
2147         /* If we're doing RSS, clamp at the number of RSS buckets */
2148         if (queues > rss_getnumbuckets())
2149                 queues = rss_getnumbuckets();
2150 #endif
2151
2152         /*
2153         ** Want one vector (RX/TX pair) per queue
2154         ** plus an additional for the admin queue.
2155         */
2156         want = queues + 1;
2157         if (want <= available)  /* Have enough */
2158                 vectors = want;
2159         else {
2160                 device_printf(pf->dev,
2161                     "MSIX Configuration Problem, "
2162                     "%d vectors available but %d wanted!\n",
2163                     available, want);
2164                 return (0); /* Will go to Legacy setup */
2165         }
2166
2167         if (pci_alloc_msix(dev, &vectors) == 0) {
2168                 device_printf(pf->dev,
2169                     "Using MSIX interrupts with %d vectors\n", vectors);
2170                 pf->msix = vectors;
2171                 pf->vsi.num_queues = queues;
2172 #ifdef RSS
2173                 /*
2174                  * If we're doing RSS, the number of queues needs to
2175                  * match the number of RSS buckets that are configured.
2176                  *
2177                  * + If there's more queues than RSS buckets, we'll end
2178                  *   up with queues that get no traffic.
2179                  *
2180                  * + If there's more RSS buckets than queues, we'll end
2181                  *   up having multiple RSS buckets map to the same queue,
2182                  *   so there'll be some contention.
2183                  */
2184                 if (queues != rss_getnumbuckets()) {
2185                         device_printf(dev,
2186                             "%s: queues (%d) != RSS buckets (%d)"
2187                             "; performance will be impacted.\n",
2188                             __func__, queues, rss_getnumbuckets());
2189                 }
2190 #endif
2191                 return (vectors);
2192         }
2193 msi:
2194         vectors = pci_msi_count(dev);
2195         pf->vsi.num_queues = 1;
2196         pf->msix = 1;
2197         ixl_max_queues = 1;
2198         ixl_enable_msix = 0;
2199         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2200                 device_printf(pf->dev,"Using an MSI interrupt\n");
2201         else {
2202                 pf->msix = 0;
2203                 device_printf(pf->dev,"Using a Legacy interrupt\n");
2204         }
2205         return (vectors);
2206 }
2207
2208
2209 /*
2210  * Plumb MSI/X vectors
2211  */
2212 static void
2213 ixl_configure_msix(struct ixl_pf *pf)
2214 {
2215         struct i40e_hw  *hw = &pf->hw;
2216         struct ixl_vsi *vsi = &pf->vsi;
2217         u32             reg;
2218         u16             vector = 1;
2219
2220         /* First set up the adminq - vector 0 */
2221         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2222         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2223
2224         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2225             I40E_PFINT_ICR0_ENA_GRST_MASK |
2226             I40E_PFINT_ICR0_HMC_ERR_MASK |
2227             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2228             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2229             I40E_PFINT_ICR0_ENA_VFLR_MASK |
2230             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2231         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2232
2233         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2234         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2235
2236         wr32(hw, I40E_PFINT_DYN_CTL0,
2237             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2238             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2239
2240         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2241
2242         /* Next configure the queues */
2243         for (int i = 0; i < vsi->num_queues; i++, vector++) {
2244                 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2245                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2246
2247                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2248                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2249                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2250                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2251                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2252                 wr32(hw, I40E_QINT_RQCTL(i), reg);
2253
2254                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2255                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2256                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2257                 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2258                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2259                 if (i == (vsi->num_queues - 1))
2260                         reg |= (IXL_QUEUE_EOL
2261                             << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2262                 wr32(hw, I40E_QINT_TQCTL(i), reg);
2263         }
2264 }
2265
2266 /*
2267  * Configure for MSI single vector operation 
2268  */
2269 static void
2270 ixl_configure_legacy(struct ixl_pf *pf)
2271 {
2272         struct i40e_hw  *hw = &pf->hw;
2273         u32             reg;
2274
2275
2276         wr32(hw, I40E_PFINT_ITR0(0), 0);
2277         wr32(hw, I40E_PFINT_ITR0(1), 0);
2278
2279
2280         /* Setup "other" causes */
2281         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2282             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2283             | I40E_PFINT_ICR0_ENA_GRST_MASK
2284             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2285             | I40E_PFINT_ICR0_ENA_GPIO_MASK
2286             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2287             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2288             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2289             | I40E_PFINT_ICR0_ENA_VFLR_MASK
2290             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2291             ;
2292         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2293
2294         /* SW_ITR_IDX = 0, but don't change INTENA */
2295         wr32(hw, I40E_PFINT_DYN_CTL0,
2296             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2297             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2298         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2299         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2300
2301         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2302         wr32(hw, I40E_PFINT_LNKLST0, 0);
2303
2304         /* Associate the queue pair to the vector and enable the q int */
2305         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2306             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2307             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2308         wr32(hw, I40E_QINT_RQCTL(0), reg);
2309
2310         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2311             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2312             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2313         wr32(hw, I40E_QINT_TQCTL(0), reg);
2314
2315         /* Next enable the queue pair */
2316         reg = rd32(hw, I40E_QTX_ENA(0));
2317         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2318         wr32(hw, I40E_QTX_ENA(0), reg);
2319
2320         reg = rd32(hw, I40E_QRX_ENA(0));
2321         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2322         wr32(hw, I40E_QRX_ENA(0), reg);
2323 }
2324
2325
2326 /*
2327  * Set the Initial ITR state
2328  */
2329 static void
2330 ixl_configure_itr(struct ixl_pf *pf)
2331 {
2332         struct i40e_hw          *hw = &pf->hw;
2333         struct ixl_vsi          *vsi = &pf->vsi;
2334         struct ixl_queue        *que = vsi->queues;
2335
2336         vsi->rx_itr_setting = ixl_rx_itr;
2337         if (ixl_dynamic_rx_itr)
2338                 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2339         vsi->tx_itr_setting = ixl_tx_itr;
2340         if (ixl_dynamic_tx_itr)
2341                 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2342         
2343         for (int i = 0; i < vsi->num_queues; i++, que++) {
2344                 struct tx_ring  *txr = &que->txr;
2345                 struct rx_ring  *rxr = &que->rxr;
2346
2347                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2348                     vsi->rx_itr_setting);
2349                 rxr->itr = vsi->rx_itr_setting;
2350                 rxr->latency = IXL_AVE_LATENCY;
2351                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2352                     vsi->tx_itr_setting);
2353                 txr->itr = vsi->tx_itr_setting;
2354                 txr->latency = IXL_AVE_LATENCY;
2355         }
2356 }
2357
2358
2359 static int
2360 ixl_allocate_pci_resources(struct ixl_pf *pf)
2361 {
2362         int             rid;
2363         device_t        dev = pf->dev;
2364
2365         rid = PCIR_BAR(0);
2366         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2367             &rid, RF_ACTIVE);
2368
2369         if (!(pf->pci_mem)) {
2370                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2371                 return (ENXIO);
2372         }
2373
2374         pf->osdep.mem_bus_space_tag =
2375                 rman_get_bustag(pf->pci_mem);
2376         pf->osdep.mem_bus_space_handle =
2377                 rman_get_bushandle(pf->pci_mem);
2378         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2379         pf->osdep.flush_reg = I40E_GLGEN_STAT;
2380         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2381
2382         pf->hw.back = &pf->osdep;
2383
2384         /*
2385         ** Now setup MSI or MSI/X, should
2386         ** return us the number of supported
2387         ** vectors. (Will be 1 for MSI)
2388         */
2389         pf->msix = ixl_init_msix(pf);
2390         return (0);
2391 }
2392
2393 static void
2394 ixl_free_pci_resources(struct ixl_pf * pf)
2395 {
2396         struct ixl_vsi          *vsi = &pf->vsi;
2397         struct ixl_queue        *que = vsi->queues;
2398         device_t                dev = pf->dev;
2399         int                     rid, memrid;
2400
2401         memrid = PCIR_BAR(IXL_BAR);
2402
2403         /* We may get here before stations are setup */
2404         if ((!ixl_enable_msix) || (que == NULL))
2405                 goto early;
2406
2407         /*
2408         **  Release all msix VSI resources:
2409         */
2410         for (int i = 0; i < vsi->num_queues; i++, que++) {
2411                 rid = que->msix + 1;
2412                 if (que->tag != NULL) {
2413                         bus_teardown_intr(dev, que->res, que->tag);
2414                         que->tag = NULL;
2415                 }
2416                 if (que->res != NULL)
2417                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2418         }
2419
2420 early:
2421         /* Clean the AdminQ interrupt last */
2422         if (pf->admvec) /* we are doing MSIX */
2423                 rid = pf->admvec + 1;
2424         else
2425                 (pf->msix != 0) ? (rid = 1):(rid = 0);
2426
2427         if (pf->tag != NULL) {
2428                 bus_teardown_intr(dev, pf->res, pf->tag);
2429                 pf->tag = NULL;
2430         }
2431         if (pf->res != NULL)
2432                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2433
2434         if (pf->msix)
2435                 pci_release_msi(dev);
2436
2437         if (pf->msix_mem != NULL)
2438                 bus_release_resource(dev, SYS_RES_MEMORY,
2439                     memrid, pf->msix_mem);
2440
2441         if (pf->pci_mem != NULL)
2442                 bus_release_resource(dev, SYS_RES_MEMORY,
2443                     PCIR_BAR(0), pf->pci_mem);
2444
2445         return;
2446 }
2447
2448 static void
2449 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2450 {
2451         /* Display supported media types */
2452         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2453                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2454
2455         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2456                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2457         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2458                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2459         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2460                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2461
2462         if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2463             phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2464             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2465                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2466
2467         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2468                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2469         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2470                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2471         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2472                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2473
2474         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2475             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2476             phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2477             phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2478             phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2479                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2480         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2481                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2482         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2483                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2484
2485 #ifndef IFM_ETH_XTYPE
2486         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2487                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2488
2489         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2490             phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2491             phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2492             phy_type & (1 << I40E_PHY_TYPE_SFI))
2493                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2494         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2495                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2496         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2497                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2498
2499         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2500                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2501         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2502                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2503 #else
2504         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2505                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2506
2507         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2508             || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2509                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2510         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2511                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2512         if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2513                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2514         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2515                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2516         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2517                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2518
2519         if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2520                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2521
2522         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2523                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2524         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2525                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2526 #endif
2527 }
2528
2529 /*********************************************************************
2530  *
2531  *  Setup networking device structure and register an interface.
2532  *
2533  **********************************************************************/
2534 static int
2535 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2536 {
2537         struct ifnet            *ifp;
2538         struct i40e_hw          *hw = vsi->hw;
2539         struct ixl_queue        *que = vsi->queues;
2540         struct i40e_aq_get_phy_abilities_resp abilities;
2541         enum i40e_status_code aq_error = 0;
2542
2543         INIT_DEBUGOUT("ixl_setup_interface: begin");
2544
2545         ifp = vsi->ifp = if_alloc(IFT_ETHER);
2546         if (ifp == NULL) {
2547                 device_printf(dev, "can not allocate ifnet structure\n");
2548                 return (-1);
2549         }
2550         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2551         ifp->if_mtu = ETHERMTU;
2552         if_initbaudrate(ifp, IF_Gbps(40));
2553         ifp->if_init = ixl_init;
2554         ifp->if_softc = vsi;
2555         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2556         ifp->if_ioctl = ixl_ioctl;
2557
2558 #if __FreeBSD_version >= 1100036
2559         if_setgetcounterfn(ifp, ixl_get_counter);
2560 #endif
2561
2562         ifp->if_transmit = ixl_mq_start;
2563
2564         ifp->if_qflush = ixl_qflush;
2565
2566         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2567
2568         vsi->max_frame_size =
2569             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2570             + ETHER_VLAN_ENCAP_LEN;
2571
2572         /*
2573          * Tell the upper layer(s) we support long frames.
2574          */
2575         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2576
2577         ifp->if_capabilities |= IFCAP_HWCSUM;
2578         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2579         ifp->if_capabilities |= IFCAP_TSO;
2580         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2581         ifp->if_capabilities |= IFCAP_LRO;
2582
2583         /* VLAN capabilties */
2584         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2585                              |  IFCAP_VLAN_HWTSO
2586                              |  IFCAP_VLAN_MTU
2587                              |  IFCAP_VLAN_HWCSUM;
2588         ifp->if_capenable = ifp->if_capabilities;
2589
2590         /*
2591         ** Don't turn this on by default, if vlans are
2592         ** created on another pseudo device (eg. lagg)
2593         ** then vlan events are not passed thru, breaking
2594         ** operation, but with HW FILTER off it works. If
2595         ** using vlans directly on the ixl driver you can
2596         ** enable this and get full hardware tag filtering.
2597         */
2598         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2599
2600         /*
2601          * Specify the media types supported by this adapter and register
2602          * callbacks to update media and link information
2603          */
2604         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2605                      ixl_media_status);
2606
2607         aq_error = i40e_aq_get_phy_capabilities(hw,
2608             FALSE, TRUE, &abilities, NULL);
2609         /* May need delay to detect fiber correctly */
2610         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2611                 i40e_msec_delay(200);
2612                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2613                     TRUE, &abilities, NULL);
2614         }
2615         if (aq_error) {
2616                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2617                         device_printf(dev, "Unknown PHY type detected!\n");
2618                 else
2619                         device_printf(dev,
2620                             "Error getting supported media types, err %d,"
2621                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2622                 return (0);
2623         }
2624
2625         ixl_add_ifmedia(vsi, abilities.phy_type);
2626
2627         /* Use autoselect media by default */
2628         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2629         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2630
2631         ether_ifattach(ifp, hw->mac.addr);
2632
2633         return (0);
2634 }
2635
2636 /*
2637 ** Run when the Admin Queue gets a
2638 ** link transition interrupt.
2639 */
2640 static void
2641 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2642 {
2643         struct i40e_hw  *hw = &pf->hw; 
2644         struct i40e_aqc_get_link_status *status =
2645             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2646         bool check;
2647
2648         hw->phy.get_link_info = TRUE;
2649         i40e_get_link_status(hw, &check);
2650         pf->link_up = check;
2651 #ifdef IXL_DEBUG
2652         printf("Link is %s\n", check ? "up":"down");
2653 #endif
2654         /* Report if Unqualified modules are found */
2655         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2656             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2657             (!(status->link_info & I40E_AQ_LINK_UP)))
2658                 device_printf(pf->dev, "Link failed because "
2659                     "an unqualified module was detected\n");
2660
2661         return;
2662 }
2663
2664 /*********************************************************************
2665  *
2666  *  Get Firmware Switch configuration
2667  *      - this will need to be more robust when more complex
2668  *        switch configurations are enabled.
2669  *
2670  **********************************************************************/
2671 static int
2672 ixl_switch_config(struct ixl_pf *pf)
2673 {
2674         struct i40e_hw  *hw = &pf->hw; 
2675         struct ixl_vsi  *vsi = &pf->vsi;
2676         device_t        dev = vsi->dev;
2677         struct i40e_aqc_get_switch_config_resp *sw_config;
2678         u8      aq_buf[I40E_AQ_LARGE_BUF];
2679         int     ret;
2680         u16     next = 0;
2681
2682         memset(&aq_buf, 0, sizeof(aq_buf));
2683         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2684         ret = i40e_aq_get_switch_config(hw, sw_config,
2685             sizeof(aq_buf), &next, NULL);
2686         if (ret) {
2687                 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2688                     ret);
2689                 return (ret);
2690         }
2691 #ifdef IXL_DEBUG
2692         device_printf(dev,
2693             "Switch config: header reported: %d in structure, %d total\n",
2694             sw_config->header.num_reported, sw_config->header.num_total);
2695         for (int i = 0; i < sw_config->header.num_reported; i++) {
2696                 device_printf(dev,
2697                     "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2698                     sw_config->element[i].element_type,
2699                     sw_config->element[i].seid,
2700                     sw_config->element[i].uplink_seid,
2701                     sw_config->element[i].downlink_seid);
2702         }
2703 #endif
2704         /* Simplified due to a single VSI at the moment */
2705         vsi->uplink_seid = sw_config->element[0].uplink_seid;
2706         vsi->downlink_seid = sw_config->element[0].downlink_seid;
2707         vsi->seid = sw_config->element[0].seid;
2708         return (ret);
2709 }
2710
2711 /*********************************************************************
2712  *
2713  *  Initialize the VSI:  this handles contexts, which means things
2714  *                       like the number of descriptors, buffer size,
2715  *                       plus we init the rings thru this function.
2716  *
2717  **********************************************************************/
2718 static int
2719 ixl_initialize_vsi(struct ixl_vsi *vsi)
2720 {
2721         struct ixl_pf           *pf = vsi->back;
2722         struct ixl_queue        *que = vsi->queues;
2723         device_t                dev = vsi->dev;
2724         struct i40e_hw          *hw = vsi->hw;
2725         struct i40e_vsi_context ctxt;
2726         int                     err = 0;
2727
2728         memset(&ctxt, 0, sizeof(ctxt));
2729         ctxt.seid = vsi->seid;
2730         if (pf->veb_seid != 0)
2731                 ctxt.uplink_seid = pf->veb_seid;
2732         ctxt.pf_num = hw->pf_id;
2733         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2734         if (err) {
2735                 device_printf(dev,"get vsi params failed %x!!\n", err);
2736                 return (err);
2737         }
2738 #ifdef IXL_DEBUG
2739         printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2740             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2741             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2742             ctxt.uplink_seid, ctxt.vsi_number,
2743             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2744             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2745             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2746 #endif
2747         /*
2748         ** Set the queue and traffic class bits
2749         **  - when multiple traffic classes are supported
2750         **    this will need to be more robust.
2751         */
2752         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2753         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2754         ctxt.info.queue_mapping[0] = 0; 
2755         ctxt.info.tc_mapping[0] = 0x0800; 
2756
2757         /* Set VLAN receive stripping mode */
2758         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2759         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2760         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2761             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2762         else
2763             ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2764
2765         /* Keep copy of VSI info in VSI for statistic counters */
2766         memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2767
2768         /* Reset VSI statistics */
2769         ixl_vsi_reset_stats(vsi);
2770         vsi->hw_filters_add = 0;
2771         vsi->hw_filters_del = 0;
2772
2773         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2774
2775         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2776         if (err) {
2777                 device_printf(dev,"update vsi params failed %x!!\n",
2778                    hw->aq.asq_last_status);
2779                 return (err);
2780         }
2781
2782         for (int i = 0; i < vsi->num_queues; i++, que++) {
2783                 struct tx_ring          *txr = &que->txr;
2784                 struct rx_ring          *rxr = &que->rxr;
2785                 struct i40e_hmc_obj_txq tctx;
2786                 struct i40e_hmc_obj_rxq rctx;
2787                 u32                     txctl;
2788                 u16                     size;
2789
2790
2791                 /* Setup the HMC TX Context  */
2792                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2793                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2794                 tctx.new_context = 1;
2795                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2796                 tctx.qlen = que->num_desc;
2797                 tctx.fc_ena = 0;
2798                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2799                 /* Enable HEAD writeback */
2800                 tctx.head_wb_ena = 1;
2801                 tctx.head_wb_addr = txr->dma.pa +
2802                     (que->num_desc * sizeof(struct i40e_tx_desc));
2803                 tctx.rdylist_act = 0;
2804                 err = i40e_clear_lan_tx_queue_context(hw, i);
2805                 if (err) {
2806                         device_printf(dev, "Unable to clear TX context\n");
2807                         break;
2808                 }
2809                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2810                 if (err) {
2811                         device_printf(dev, "Unable to set TX context\n");
2812                         break;
2813                 }
2814                 /* Associate the ring with this PF */
2815                 txctl = I40E_QTX_CTL_PF_QUEUE;
2816                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2817                     I40E_QTX_CTL_PF_INDX_MASK);
2818                 wr32(hw, I40E_QTX_CTL(i), txctl);
2819                 ixl_flush(hw);
2820
2821                 /* Do ring (re)init */
2822                 ixl_init_tx_ring(que);
2823
2824                 /* Next setup the HMC RX Context  */
2825                 if (vsi->max_frame_size <= MCLBYTES)
2826                         rxr->mbuf_sz = MCLBYTES;
2827                 else
2828                         rxr->mbuf_sz = MJUMPAGESIZE;
2829
2830                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2831
2832                 /* Set up an RX context for the HMC */
2833                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2834                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2835                 /* ignore header split for now */
2836                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2837                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2838                     vsi->max_frame_size : max_rxmax;
2839                 rctx.dtype = 0;
2840                 rctx.dsize = 1; /* do 32byte descriptors */
2841                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2842                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2843                 rctx.qlen = que->num_desc;
2844                 rctx.tphrdesc_ena = 1;
2845                 rctx.tphwdesc_ena = 1;
2846                 rctx.tphdata_ena = 0;
2847                 rctx.tphhead_ena = 0;
2848                 rctx.lrxqthresh = 2;
2849                 rctx.crcstrip = 1;
2850                 rctx.l2tsel = 1;
2851                 rctx.showiv = 1;
2852                 rctx.fc_ena = 0;
2853                 rctx.prefena = 1;
2854
2855                 err = i40e_clear_lan_rx_queue_context(hw, i);
2856                 if (err) {
2857                         device_printf(dev,
2858                             "Unable to clear RX context %d\n", i);
2859                         break;
2860                 }
2861                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2862                 if (err) {
2863                         device_printf(dev, "Unable to set RX context %d\n", i);
2864                         break;
2865                 }
2866                 err = ixl_init_rx_ring(que);
2867                 if (err) {
2868                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2869                         break;
2870                 }
2871                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2872 #ifdef DEV_NETMAP
2873                 /* preserve queue */
2874                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2875                         struct netmap_adapter *na = NA(vsi->ifp);
2876                         struct netmap_kring *kring = &na->rx_rings[i];
2877                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2878                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2879                 } else
2880 #endif /* DEV_NETMAP */
2881                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2882         }
2883         return (err);
2884 }
2885
2886
2887 /*********************************************************************
2888  *
2889  *  Free all VSI structs.
2890  *
2891  **********************************************************************/
2892 void
2893 ixl_free_vsi(struct ixl_vsi *vsi)
2894 {
2895         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2896         struct ixl_queue        *que = vsi->queues;
2897
2898         /* Free station queues */
2899         for (int i = 0; i < vsi->num_queues; i++, que++) {
2900                 struct tx_ring *txr = &que->txr;
2901                 struct rx_ring *rxr = &que->rxr;
2902         
2903                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2904                         continue;
2905                 IXL_TX_LOCK(txr);
2906                 ixl_free_que_tx(que);
2907                 if (txr->base)
2908                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2909                 IXL_TX_UNLOCK(txr);
2910                 IXL_TX_LOCK_DESTROY(txr);
2911
2912                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2913                         continue;
2914                 IXL_RX_LOCK(rxr);
2915                 ixl_free_que_rx(que);
2916                 if (rxr->base)
2917                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2918                 IXL_RX_UNLOCK(rxr);
2919                 IXL_RX_LOCK_DESTROY(rxr);
2920                 
2921         }
2922         free(vsi->queues, M_DEVBUF);
2923
2924         /* Free VSI filter list */
2925         ixl_free_mac_filters(vsi);
2926 }
2927
2928 static void
2929 ixl_free_mac_filters(struct ixl_vsi *vsi)
2930 {
2931         struct ixl_mac_filter *f;
2932
2933         while (!SLIST_EMPTY(&vsi->ftl)) {
2934                 f = SLIST_FIRST(&vsi->ftl);
2935                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2936                 free(f, M_DEVBUF);
2937         }
2938 }
2939
2940
2941 /*********************************************************************
2942  *
2943  *  Allocate memory for the VSI (virtual station interface) and their
2944  *  associated queues, rings and the descriptors associated with each,
2945  *  called only once at attach.
2946  *
2947  **********************************************************************/
2948 static int
2949 ixl_setup_stations(struct ixl_pf *pf)
2950 {
2951         device_t                dev = pf->dev;
2952         struct ixl_vsi          *vsi;
2953         struct ixl_queue        *que;
2954         struct tx_ring          *txr;
2955         struct rx_ring          *rxr;
2956         int                     rsize, tsize;
2957         int                     error = I40E_SUCCESS;
2958
2959         vsi = &pf->vsi;
2960         vsi->back = (void *)pf;
2961         vsi->hw = &pf->hw;
2962         vsi->id = 0;
2963         vsi->num_vlans = 0;
2964         vsi->back = pf;
2965
2966         /* Get memory for the station queues */
2967         if (!(vsi->queues =
2968             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2969             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2970                 device_printf(dev, "Unable to allocate queue memory\n");
2971                 error = ENOMEM;
2972                 goto early;
2973         }
2974
2975         for (int i = 0; i < vsi->num_queues; i++) {
2976                 que = &vsi->queues[i];
2977                 que->num_desc = ixl_ringsz;
2978                 que->me = i;
2979                 que->vsi = vsi;
2980                 /* mark the queue as active */
2981                 vsi->active_queues |= (u64)1 << que->me;
2982                 txr = &que->txr;
2983                 txr->que = que;
2984                 txr->tail = I40E_QTX_TAIL(que->me);
2985
2986                 /* Initialize the TX lock */
2987                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2988                     device_get_nameunit(dev), que->me);
2989                 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2990                 /* Create the TX descriptor ring */
2991                 tsize = roundup2((que->num_desc *
2992                     sizeof(struct i40e_tx_desc)) +
2993                     sizeof(u32), DBA_ALIGN);
2994                 if (i40e_allocate_dma_mem(&pf->hw,
2995                     &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2996                         device_printf(dev,
2997                             "Unable to allocate TX Descriptor memory\n");
2998                         error = ENOMEM;
2999                         goto fail;
3000                 }
3001                 txr->base = (struct i40e_tx_desc *)txr->dma.va;
3002                 bzero((void *)txr->base, tsize);
3003                 /* Now allocate transmit soft structs for the ring */
3004                 if (ixl_allocate_tx_data(que)) {
3005                         device_printf(dev,
3006                             "Critical Failure setting up TX structures\n");
3007                         error = ENOMEM;
3008                         goto fail;
3009                 }
3010                 /* Allocate a buf ring */
3011                 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3012                     M_WAITOK, &txr->mtx);
3013                 if (txr->br == NULL) {
3014                         device_printf(dev,
3015                             "Critical Failure setting up TX buf ring\n");
3016                         error = ENOMEM;
3017                         goto fail;
3018                 }
3019
3020                 /*
3021                  * Next the RX queues...
3022                  */ 
3023                 rsize = roundup2(que->num_desc *
3024                     sizeof(union i40e_rx_desc), DBA_ALIGN);
3025                 rxr = &que->rxr;
3026                 rxr->que = que;
3027                 rxr->tail = I40E_QRX_TAIL(que->me);
3028
3029                 /* Initialize the RX side lock */
3030                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3031                     device_get_nameunit(dev), que->me);
3032                 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3033
3034                 if (i40e_allocate_dma_mem(&pf->hw,
3035                     &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3036                         device_printf(dev,
3037                             "Unable to allocate RX Descriptor memory\n");
3038                         error = ENOMEM;
3039                         goto fail;
3040                 }
3041                 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3042                 bzero((void *)rxr->base, rsize);
3043
3044                 /* Allocate receive soft structs for the ring*/
3045                 if (ixl_allocate_rx_data(que)) {
3046                         device_printf(dev,
3047                             "Critical Failure setting up receive structs\n");
3048                         error = ENOMEM;
3049                         goto fail;
3050                 }
3051         }
3052
3053         return (0);
3054
3055 fail:
3056         for (int i = 0; i < vsi->num_queues; i++) {
3057                 que = &vsi->queues[i];
3058                 rxr = &que->rxr;
3059                 txr = &que->txr;
3060                 if (rxr->base)
3061                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
3062                 if (txr->base)
3063                         i40e_free_dma_mem(&pf->hw, &txr->dma);
3064         }
3065
3066 early:
3067         return (error);
3068 }
3069
3070 /*
3071 ** Provide a update to the queue RX
3072 ** interrupt moderation value.
3073 */
3074 static void
3075 ixl_set_queue_rx_itr(struct ixl_queue *que)
3076 {
3077         struct ixl_vsi  *vsi = que->vsi;
3078         struct i40e_hw  *hw = vsi->hw;
3079         struct rx_ring  *rxr = &que->rxr;
3080         u16             rx_itr;
3081         u16             rx_latency = 0;
3082         int             rx_bytes;
3083
3084
3085         /* Idle, do nothing */
3086         if (rxr->bytes == 0)
3087                 return;
3088
3089         if (ixl_dynamic_rx_itr) {
3090                 rx_bytes = rxr->bytes/rxr->itr;
3091                 rx_itr = rxr->itr;
3092
3093                 /* Adjust latency range */
3094                 switch (rxr->latency) {
3095                 case IXL_LOW_LATENCY:
3096                         if (rx_bytes > 10) {
3097                                 rx_latency = IXL_AVE_LATENCY;
3098                                 rx_itr = IXL_ITR_20K;
3099                         }
3100                         break;
3101                 case IXL_AVE_LATENCY:
3102                         if (rx_bytes > 20) {
3103                                 rx_latency = IXL_BULK_LATENCY;
3104                                 rx_itr = IXL_ITR_8K;
3105                         } else if (rx_bytes <= 10) {
3106                                 rx_latency = IXL_LOW_LATENCY;
3107                                 rx_itr = IXL_ITR_100K;
3108                         }
3109                         break;
3110                 case IXL_BULK_LATENCY:
3111                         if (rx_bytes <= 20) {
3112                                 rx_latency = IXL_AVE_LATENCY;
3113                                 rx_itr = IXL_ITR_20K;
3114                         }
3115                         break;
3116                  }
3117
3118                 rxr->latency = rx_latency;
3119
3120                 if (rx_itr != rxr->itr) {
3121                         /* do an exponential smoothing */
3122                         rx_itr = (10 * rx_itr * rxr->itr) /
3123                             ((9 * rx_itr) + rxr->itr);
3124                         rxr->itr = rx_itr & IXL_MAX_ITR;
3125                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3126                             que->me), rxr->itr);
3127                 }
3128         } else { /* We may have have toggled to non-dynamic */
3129                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3130                         vsi->rx_itr_setting = ixl_rx_itr;
3131                 /* Update the hardware if needed */
3132                 if (rxr->itr != vsi->rx_itr_setting) {
3133                         rxr->itr = vsi->rx_itr_setting;
3134                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3135                             que->me), rxr->itr);
3136                 }
3137         }
3138         rxr->bytes = 0;
3139         rxr->packets = 0;
3140         return;
3141 }
3142
3143
3144 /*
3145 ** Provide a update to the queue TX
3146 ** interrupt moderation value.
3147 */
3148 static void
3149 ixl_set_queue_tx_itr(struct ixl_queue *que)
3150 {
3151         struct ixl_vsi  *vsi = que->vsi;
3152         struct i40e_hw  *hw = vsi->hw;
3153         struct tx_ring  *txr = &que->txr;
3154         u16             tx_itr;
3155         u16             tx_latency = 0;
3156         int             tx_bytes;
3157
3158
3159         /* Idle, do nothing */
3160         if (txr->bytes == 0)
3161                 return;
3162
3163         if (ixl_dynamic_tx_itr) {
3164                 tx_bytes = txr->bytes/txr->itr;
3165                 tx_itr = txr->itr;
3166
3167                 switch (txr->latency) {
3168                 case IXL_LOW_LATENCY:
3169                         if (tx_bytes > 10) {
3170                                 tx_latency = IXL_AVE_LATENCY;
3171                                 tx_itr = IXL_ITR_20K;
3172                         }
3173                         break;
3174                 case IXL_AVE_LATENCY:
3175                         if (tx_bytes > 20) {
3176                                 tx_latency = IXL_BULK_LATENCY;
3177                                 tx_itr = IXL_ITR_8K;
3178                         } else if (tx_bytes <= 10) {
3179                                 tx_latency = IXL_LOW_LATENCY;
3180                                 tx_itr = IXL_ITR_100K;
3181                         }
3182                         break;
3183                 case IXL_BULK_LATENCY:
3184                         if (tx_bytes <= 20) {
3185                                 tx_latency = IXL_AVE_LATENCY;
3186                                 tx_itr = IXL_ITR_20K;
3187                         }
3188                         break;
3189                 }
3190
3191                 txr->latency = tx_latency;
3192
3193                 if (tx_itr != txr->itr) {
3194                  /* do an exponential smoothing */
3195                         tx_itr = (10 * tx_itr * txr->itr) /
3196                             ((9 * tx_itr) + txr->itr);
3197                         txr->itr = tx_itr & IXL_MAX_ITR;
3198                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3199                             que->me), txr->itr);
3200                 }
3201
3202         } else { /* We may have have toggled to non-dynamic */
3203                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3204                         vsi->tx_itr_setting = ixl_tx_itr;
3205                 /* Update the hardware if needed */
3206                 if (txr->itr != vsi->tx_itr_setting) {
3207                         txr->itr = vsi->tx_itr_setting;
3208                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3209                             que->me), txr->itr);
3210                 }
3211         }
3212         txr->bytes = 0;
3213         txr->packets = 0;
3214         return;
3215 }
3216
3217 #define QUEUE_NAME_LEN 32
3218
3219 static void
3220 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3221     struct sysctl_ctx_list *ctx, const char *sysctl_name)
3222 {
3223         struct sysctl_oid *tree;
3224         struct sysctl_oid_list *child;
3225         struct sysctl_oid_list *vsi_list;
3226
3227         tree = device_get_sysctl_tree(pf->dev);
3228         child = SYSCTL_CHILDREN(tree);
3229         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3230                                    CTLFLAG_RD, NULL, "VSI Number");
3231         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3232
3233         ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3234 }
3235
3236 static void
3237 ixl_add_hw_stats(struct ixl_pf *pf)
3238 {
3239         device_t dev = pf->dev;
3240         struct ixl_vsi *vsi = &pf->vsi;
3241         struct ixl_queue *queues = vsi->queues;
3242         struct i40e_hw_port_stats *pf_stats = &pf->stats;
3243
3244         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3245         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3246         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3247         struct sysctl_oid_list *vsi_list;
3248
3249         struct sysctl_oid *queue_node;
3250         struct sysctl_oid_list *queue_list;
3251
3252         struct tx_ring *txr;
3253         struct rx_ring *rxr;
3254         char queue_namebuf[QUEUE_NAME_LEN];
3255
3256         /* Driver statistics */
3257         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3258                         CTLFLAG_RD, &pf->watchdog_events,
3259                         "Watchdog timeouts");
3260         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3261                         CTLFLAG_RD, &pf->admin_irq,
3262                         "Admin Queue IRQ Handled");
3263
3264         ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3265         vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3266
3267         /* Queue statistics */
3268         for (int q = 0; q < vsi->num_queues; q++) {
3269                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3270                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3271                     OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3272                 queue_list = SYSCTL_CHILDREN(queue_node);
3273
3274                 txr = &(queues[q].txr);
3275                 rxr = &(queues[q].rxr);
3276
3277                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3278                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3279                                 "m_defrag() failed");
3280                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3281                                 CTLFLAG_RD, &(queues[q].dropped_pkts),
3282                                 "Driver dropped packets");
3283                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3284                                 CTLFLAG_RD, &(queues[q].irqs),
3285                                 "irqs on this queue");
3286                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3287                                 CTLFLAG_RD, &(queues[q].tso),
3288                                 "TSO");
3289                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3290                                 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3291                                 "Driver tx dma failure in xmit");
3292                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3293                                 CTLFLAG_RD, &(txr->no_desc),
3294                                 "Queue No Descriptor Available");
3295                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3296                                 CTLFLAG_RD, &(txr->total_packets),
3297                                 "Queue Packets Transmitted");
3298                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3299                                 CTLFLAG_RD, &(txr->tx_bytes),
3300                                 "Queue Bytes Transmitted");
3301                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3302                                 CTLFLAG_RD, &(rxr->rx_packets),
3303                                 "Queue Packets Received");
3304                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3305                                 CTLFLAG_RD, &(rxr->rx_bytes),
3306                                 "Queue Bytes Received");
3307         }
3308
3309         /* MAC stats */
3310         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3311 }
3312
3313 static void
3314 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3315         struct sysctl_oid_list *child,
3316         struct i40e_eth_stats *eth_stats)
3317 {
3318         struct ixl_sysctl_info ctls[] =
3319         {
3320                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3321                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3322                         "Unicast Packets Received"},
3323                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3324                         "Multicast Packets Received"},
3325                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3326                         "Broadcast Packets Received"},
3327                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3328                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3329                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3330                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
3331                         "Multicast Packets Transmitted"},
3332                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
3333                         "Broadcast Packets Transmitted"},
3334                 // end
3335                 {0,0,0}
3336         };
3337
3338         struct ixl_sysctl_info *entry = ctls;
3339         while (entry->stat != 0)
3340         {
3341                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3342                                 CTLFLAG_RD, entry->stat,
3343                                 entry->description);
3344                 entry++;
3345         }
3346 }
3347
3348 static void
3349 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3350         struct sysctl_oid_list *child,
3351         struct i40e_hw_port_stats *stats)
3352 {
3353         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3354                                     CTLFLAG_RD, NULL, "Mac Statistics");
3355         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3356
3357         struct i40e_eth_stats *eth_stats = &stats->eth;
3358         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3359
3360         struct ixl_sysctl_info ctls[] = 
3361         {
3362                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3363                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3364                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3365                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3366                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3367                 /* Packet Reception Stats */
3368                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3369                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3370                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3371                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3372                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3373                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3374                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3375                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3376                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3377                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3378                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3379                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3380                 /* Packet Transmission Stats */
3381                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3382                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3383                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3384                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3385                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3386                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3387                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3388                 /* Flow control */
3389                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3390                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3391                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3392                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3393                 /* End */
3394                 {0,0,0}
3395         };
3396
3397         struct ixl_sysctl_info *entry = ctls;
3398         while (entry->stat != 0)
3399         {
3400                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3401                                 CTLFLAG_RD, entry->stat,
3402                                 entry->description);
3403                 entry++;
3404         }
3405 }
3406
3407
3408 /*
3409 ** ixl_config_rss - setup RSS 
3410 **  - note this is done for the single vsi
3411 */
3412 static void ixl_config_rss(struct ixl_vsi *vsi)
3413 {
3414         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3415         struct i40e_hw  *hw = vsi->hw;
3416         u32             lut = 0;
3417         u64             set_hena = 0, hena;
3418         int             i, j, que_id;
3419 #ifdef RSS
3420         u32             rss_hash_config;
3421         u32             rss_seed[IXL_KEYSZ];
3422 #else
3423         u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3424                             0x183cfd8c, 0xce880440, 0x580cbc3c,
3425                             0x35897377, 0x328b25e1, 0x4fa98922,
3426                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3427 #endif
3428
3429 #ifdef RSS
3430         /* Fetch the configured RSS key */
3431         rss_getkey((uint8_t *) &rss_seed);
3432 #endif
3433
3434         /* Fill out hash function seed */
3435         for (i = 0; i < IXL_KEYSZ; i++)
3436                 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3437
3438         /* Enable PCTYPES for RSS: */
3439 #ifdef RSS
3440         rss_hash_config = rss_gethashconfig();
3441         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3442                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3443         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3444                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3445         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3446                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3447         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3448                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3449         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3450                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3451         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3452                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3453         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3454                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3455 #else
3456         set_hena =
3457                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3458                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3459                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3460                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3461                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3462                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3463                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3464                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3465                 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3466                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3467                 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3468 #endif
3469         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3470             ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3471         hena |= set_hena;
3472         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3473         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3474
3475         /* Populate the LUT with max no. of queues in round robin fashion */
3476         for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3477                 if (j == vsi->num_queues)
3478                         j = 0;
3479 #ifdef RSS
3480                 /*
3481                  * Fetch the RSS bucket id for the given indirection entry.
3482                  * Cap it at the number of configured buckets (which is
3483                  * num_queues.)
3484                  */
3485                 que_id = rss_get_indirection_to_bucket(i);
3486                 que_id = que_id % vsi->num_queues;
3487 #else
3488                 que_id = j;
3489 #endif
3490                 /* lut = 4-byte sliding window of 4 lut entries */
3491                 lut = (lut << 8) | (que_id &
3492                     ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3493                 /* On i = 3, we have 4 entries in lut; write to the register */
3494                 if ((i & 3) == 3)
3495                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3496         }
3497         ixl_flush(hw);
3498 }
3499
3500
3501 /*
3502 ** This routine is run via an vlan config EVENT,
3503 ** it enables us to use the HW Filter table since
3504 ** we can get the vlan id. This just creates the
3505 ** entry in the soft version of the VFTA, init will
3506 ** repopulate the real table.
3507 */
3508 static void
3509 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3510 {
3511         struct ixl_vsi  *vsi = ifp->if_softc;
3512         struct i40e_hw  *hw = vsi->hw;
3513         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3514
3515         if (ifp->if_softc !=  arg)   /* Not our event */
3516                 return;
3517
3518         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3519                 return;
3520
3521         IXL_PF_LOCK(pf);
3522         ++vsi->num_vlans;
3523         ixl_add_filter(vsi, hw->mac.addr, vtag);
3524         IXL_PF_UNLOCK(pf);
3525 }
3526
3527 /*
3528 ** This routine is run via an vlan
3529 ** unconfig EVENT, remove our entry
3530 ** in the soft vfta.
3531 */
3532 static void
3533 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3534 {
3535         struct ixl_vsi  *vsi = ifp->if_softc;
3536         struct i40e_hw  *hw = vsi->hw;
3537         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3538
3539         if (ifp->if_softc !=  arg)
3540                 return;
3541
3542         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3543                 return;
3544
3545         IXL_PF_LOCK(pf);
3546         --vsi->num_vlans;
3547         ixl_del_filter(vsi, hw->mac.addr, vtag);
3548         IXL_PF_UNLOCK(pf);
3549 }
3550
3551 /*
3552 ** This routine updates vlan filters, called by init
3553 ** it scans the filter table and then updates the hw
3554 ** after a soft reset.
3555 */
3556 static void
3557 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3558 {
3559         struct ixl_mac_filter   *f;
3560         int                     cnt = 0, flags;
3561
3562         if (vsi->num_vlans == 0)
3563                 return;
3564         /*
3565         ** Scan the filter list for vlan entries,
3566         ** mark them for addition and then call
3567         ** for the AQ update.
3568         */
3569         SLIST_FOREACH(f, &vsi->ftl, next) {
3570                 if (f->flags & IXL_FILTER_VLAN) {
3571                         f->flags |=
3572                             (IXL_FILTER_ADD |
3573                             IXL_FILTER_USED);
3574                         cnt++;
3575                 }
3576         }
3577         if (cnt == 0) {
3578                 printf("setup vlan: no filters found!\n");
3579                 return;
3580         }
3581         flags = IXL_FILTER_VLAN;
3582         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3583         ixl_add_hw_filters(vsi, flags, cnt);
3584         return;
3585 }
3586
3587 /*
3588 ** Initialize filter list and add filters that the hardware
3589 ** needs to know about.
3590 */
3591 static void
3592 ixl_init_filters(struct ixl_vsi *vsi)
3593 {
3594         /* Add broadcast address */
3595         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3596 }
3597
3598 /*
3599 ** This routine adds mulicast filters
3600 */
3601 static void
3602 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3603 {
3604         struct ixl_mac_filter *f;
3605
3606         /* Does one already exist */
3607         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3608         if (f != NULL)
3609                 return;
3610
3611         f = ixl_get_filter(vsi);
3612         if (f == NULL) {
3613                 printf("WARNING: no filter available!!\n");
3614                 return;
3615         }
3616         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3617         f->vlan = IXL_VLAN_ANY;
3618         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3619             | IXL_FILTER_MC);
3620
3621         return;
3622 }
3623
3624 static void
3625 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3626 {
3627
3628         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3629 }
3630
3631 /*
3632 ** This routine adds macvlan filters
3633 */
3634 static void
3635 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3636 {
3637         struct ixl_mac_filter   *f, *tmp;
3638         struct ixl_pf           *pf;
3639         device_t                dev;
3640
3641         DEBUGOUT("ixl_add_filter: begin");
3642
3643         pf = vsi->back;
3644         dev = pf->dev;
3645
3646         /* Does one already exist */
3647         f = ixl_find_filter(vsi, macaddr, vlan);
3648         if (f != NULL)
3649                 return;
3650         /*
3651         ** Is this the first vlan being registered, if so we
3652         ** need to remove the ANY filter that indicates we are
3653         ** not in a vlan, and replace that with a 0 filter.
3654         */
3655         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3656                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3657                 if (tmp != NULL) {
3658                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3659                         ixl_add_filter(vsi, macaddr, 0);
3660                 }
3661         }
3662
3663         f = ixl_get_filter(vsi);
3664         if (f == NULL) {
3665                 device_printf(dev, "WARNING: no filter available!!\n");
3666                 return;
3667         }
3668         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3669         f->vlan = vlan;
3670         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3671         if (f->vlan != IXL_VLAN_ANY)
3672                 f->flags |= IXL_FILTER_VLAN;
3673         else
3674                 vsi->num_macs++;
3675
3676         ixl_add_hw_filters(vsi, f->flags, 1);
3677         return;
3678 }
3679
3680 static void
3681 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3682 {
3683         struct ixl_mac_filter *f;
3684
3685         f = ixl_find_filter(vsi, macaddr, vlan);
3686         if (f == NULL)
3687                 return;
3688
3689         f->flags |= IXL_FILTER_DEL;
3690         ixl_del_hw_filters(vsi, 1);
3691         vsi->num_macs--;
3692
3693         /* Check if this is the last vlan removal */
3694         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3695                 /* Switch back to a non-vlan filter */
3696                 ixl_del_filter(vsi, macaddr, 0);
3697                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3698         }
3699         return;
3700 }
3701
3702 /*
3703 ** Find the filter with both matching mac addr and vlan id
3704 */
3705 static struct ixl_mac_filter *
3706 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3707 {
3708         struct ixl_mac_filter   *f;
3709         bool                    match = FALSE;
3710
3711         SLIST_FOREACH(f, &vsi->ftl, next) {
3712                 if (!cmp_etheraddr(f->macaddr, macaddr))
3713                         continue;
3714                 if (f->vlan == vlan) {
3715                         match = TRUE;
3716                         break;
3717                 }
3718         }       
3719
3720         if (!match)
3721                 f = NULL;
3722         return (f);
3723 }
3724
3725 /*
3726 ** This routine takes additions to the vsi filter
3727 ** table and creates an Admin Queue call to create
3728 ** the filters in the hardware.
3729 */
3730 static void
3731 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3732 {
3733         struct i40e_aqc_add_macvlan_element_data *a, *b;
3734         struct ixl_mac_filter   *f;
3735         struct ixl_pf           *pf;
3736         struct i40e_hw          *hw;
3737         device_t                dev;
3738         int                     err, j = 0;
3739
3740         pf = vsi->back;
3741         dev = pf->dev;
3742         hw = &pf->hw;
3743         IXL_PF_LOCK_ASSERT(pf);
3744
3745         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3746             M_DEVBUF, M_NOWAIT | M_ZERO);
3747         if (a == NULL) {
3748                 device_printf(dev, "add_hw_filters failed to get memory\n");
3749                 return;
3750         }
3751
3752         /*
3753         ** Scan the filter list, each time we find one
3754         ** we add it to the admin queue array and turn off
3755         ** the add bit.
3756         */
3757         SLIST_FOREACH(f, &vsi->ftl, next) {
3758                 if (f->flags == flags) {
3759                         b = &a[j]; // a pox on fvl long names :)
3760                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3761                         if (f->vlan == IXL_VLAN_ANY) {
3762                                 b->vlan_tag = 0;
3763                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3764                         } else {
3765                                 b->vlan_tag = f->vlan;
3766                                 b->flags = 0;
3767                         }
3768                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3769                         f->flags &= ~IXL_FILTER_ADD;
3770                         j++;
3771                 }
3772                 if (j == cnt)
3773                         break;
3774         }
3775         if (j > 0) {
3776                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3777                 if (err) 
3778                         device_printf(dev, "aq_add_macvlan err %d, "
3779                             "aq_error %d\n", err, hw->aq.asq_last_status);
3780                 else
3781                         vsi->hw_filters_add += j;
3782         }
3783         free(a, M_DEVBUF);
3784         return;
3785 }
3786
3787 /*
3788 ** This routine takes removals in the vsi filter
3789 ** table and creates an Admin Queue call to delete
3790 ** the filters in the hardware.
3791 */
3792 static void
3793 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3794 {
3795         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3796         struct ixl_pf           *pf;
3797         struct i40e_hw          *hw;
3798         device_t                dev;
3799         struct ixl_mac_filter   *f, *f_temp;
3800         int                     err, j = 0;
3801
3802         DEBUGOUT("ixl_del_hw_filters: begin\n");
3803
3804         pf = vsi->back;
3805         hw = &pf->hw;
3806         dev = pf->dev;
3807
3808         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3809             M_DEVBUF, M_NOWAIT | M_ZERO);
3810         if (d == NULL) {
3811                 printf("del hw filter failed to get memory\n");
3812                 return;
3813         }
3814
3815         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3816                 if (f->flags & IXL_FILTER_DEL) {
3817                         e = &d[j]; // a pox on fvl long names :)
3818                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3819                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3820                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3821                         /* delete entry from vsi list */
3822                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3823                         free(f, M_DEVBUF);
3824                         j++;
3825                 }
3826                 if (j == cnt)
3827                         break;
3828         }
3829         if (j > 0) {
3830                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3831                 /* NOTE: returns ENOENT every time but seems to work fine,
3832                    so we'll ignore that specific error. */
3833                 // TODO: Does this still occur on current firmwares?
3834                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3835                         int sc = 0;
3836                         for (int i = 0; i < j; i++)
3837                                 sc += (!d[i].error_code);
3838                         vsi->hw_filters_del += sc;
3839                         device_printf(dev,
3840                             "Failed to remove %d/%d filters, aq error %d\n",
3841                             j - sc, j, hw->aq.asq_last_status);
3842                 } else
3843                         vsi->hw_filters_del += j;
3844         }
3845         free(d, M_DEVBUF);
3846
3847         DEBUGOUT("ixl_del_hw_filters: end\n");
3848         return;
3849 }
3850
3851 static int
3852 ixl_enable_rings(struct ixl_vsi *vsi)
3853 {
3854         struct ixl_pf   *pf = vsi->back;
3855         struct i40e_hw  *hw = &pf->hw;
3856         int             index, error;
3857         u32             reg;
3858
3859         error = 0;
3860         for (int i = 0; i < vsi->num_queues; i++) {
3861                 index = vsi->first_queue + i;
3862                 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3863
3864                 reg = rd32(hw, I40E_QTX_ENA(index));
3865                 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3866                     I40E_QTX_ENA_QENA_STAT_MASK;
3867                 wr32(hw, I40E_QTX_ENA(index), reg);
3868                 /* Verify the enable took */
3869                 for (int j = 0; j < 10; j++) {
3870                         reg = rd32(hw, I40E_QTX_ENA(index));
3871                         if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3872                                 break;
3873                         i40e_msec_delay(10);
3874                 }
3875                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3876                         device_printf(pf->dev, "TX queue %d disabled!\n",
3877                             index);
3878                         error = ETIMEDOUT;
3879                 }
3880
3881                 reg = rd32(hw, I40E_QRX_ENA(index));
3882                 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3883                     I40E_QRX_ENA_QENA_STAT_MASK;
3884                 wr32(hw, I40E_QRX_ENA(index), reg);
3885                 /* Verify the enable took */
3886                 for (int j = 0; j < 10; j++) {
3887                         reg = rd32(hw, I40E_QRX_ENA(index));
3888                         if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3889                                 break;
3890                         i40e_msec_delay(10);
3891                 }
3892                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3893                         device_printf(pf->dev, "RX queue %d disabled!\n",
3894                             index);
3895                         error = ETIMEDOUT;
3896                 }
3897         }
3898
3899         return (error);
3900 }
3901
3902 static int
3903 ixl_disable_rings(struct ixl_vsi *vsi)
3904 {
3905         struct ixl_pf   *pf = vsi->back;
3906         struct i40e_hw  *hw = &pf->hw;
3907         int             index, error;
3908         u32             reg;
3909
3910         error = 0;
3911         for (int i = 0; i < vsi->num_queues; i++) {
3912                 index = vsi->first_queue + i;
3913
3914                 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3915                 i40e_usec_delay(500);
3916
3917                 reg = rd32(hw, I40E_QTX_ENA(index));
3918                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3919                 wr32(hw, I40E_QTX_ENA(index), reg);
3920                 /* Verify the disable took */
3921                 for (int j = 0; j < 10; j++) {
3922                         reg = rd32(hw, I40E_QTX_ENA(index));
3923                         if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3924                                 break;
3925                         i40e_msec_delay(10);
3926                 }
3927                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3928                         device_printf(pf->dev, "TX queue %d still enabled!\n",
3929                             index);
3930                         error = ETIMEDOUT;
3931                 }
3932
3933                 reg = rd32(hw, I40E_QRX_ENA(index));
3934                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3935                 wr32(hw, I40E_QRX_ENA(index), reg);
3936                 /* Verify the disable took */
3937                 for (int j = 0; j < 10; j++) {
3938                         reg = rd32(hw, I40E_QRX_ENA(index));
3939                         if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3940                                 break;
3941                         i40e_msec_delay(10);
3942                 }
3943                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3944                         device_printf(pf->dev, "RX queue %d still enabled!\n",
3945                             index);
3946                         error = ETIMEDOUT;
3947                 }
3948         }
3949
3950         return (error);
3951 }
3952
3953 /**
3954  * ixl_handle_mdd_event
3955  *
3956  * Called from interrupt handler to identify possibly malicious vfs
3957  * (But also detects events from the PF, as well)
3958  **/
3959 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3960 {
3961         struct i40e_hw *hw = &pf->hw;
3962         device_t dev = pf->dev;
3963         bool mdd_detected = false;
3964         bool pf_mdd_detected = false;
3965         u32 reg;
3966
3967         /* find what triggered the MDD event */
3968         reg = rd32(hw, I40E_GL_MDET_TX);
3969         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3970                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3971                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3972                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3973                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3974                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3975                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3976                 device_printf(dev,
3977                          "Malicious Driver Detection event 0x%02x"
3978                          " on TX queue %d pf number 0x%02x\n",
3979                          event, queue, pf_num);
3980                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3981                 mdd_detected = true;
3982         }
3983         reg = rd32(hw, I40E_GL_MDET_RX);
3984         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3985                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3986                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3987                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3988                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3989                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3990                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3991                 device_printf(dev,
3992                          "Malicious Driver Detection event 0x%02x"
3993                          " on RX queue %d of function 0x%02x\n",
3994                          event, queue, func);
3995                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3996                 mdd_detected = true;
3997         }
3998
3999         if (mdd_detected) {
4000                 reg = rd32(hw, I40E_PF_MDET_TX);
4001                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4002                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4003                         device_printf(dev,
4004                                  "MDD TX event is for this function 0x%08x",
4005                                  reg);
4006                         pf_mdd_detected = true;
4007                 }
4008                 reg = rd32(hw, I40E_PF_MDET_RX);
4009                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4010                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4011                         device_printf(dev,
4012                                  "MDD RX event is for this function 0x%08x",
4013                                  reg);
4014                         pf_mdd_detected = true;
4015                 }
4016         }
4017
4018         /* re-enable mdd interrupt cause */
4019         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4020         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4021         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4022         ixl_flush(hw);
4023 }
4024
4025 static void
4026 ixl_enable_intr(struct ixl_vsi *vsi)
4027 {
4028         struct i40e_hw          *hw = vsi->hw;
4029         struct ixl_queue        *que = vsi->queues;
4030
4031         if (ixl_enable_msix) {
4032                 ixl_enable_adminq(hw);
4033                 for (int i = 0; i < vsi->num_queues; i++, que++)
4034                         ixl_enable_queue(hw, que->me);
4035         } else
4036                 ixl_enable_legacy(hw);
4037 }
4038
4039 static void
4040 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4041 {
4042         struct i40e_hw          *hw = vsi->hw;
4043         struct ixl_queue        *que = vsi->queues;
4044
4045         for (int i = 0; i < vsi->num_queues; i++, que++)
4046                 ixl_disable_queue(hw, que->me);
4047 }
4048
4049 static void
4050 ixl_disable_intr(struct ixl_vsi *vsi)
4051 {
4052         struct i40e_hw          *hw = vsi->hw;
4053
4054         if (ixl_enable_msix)
4055                 ixl_disable_adminq(hw);
4056         else
4057                 ixl_disable_legacy(hw);
4058 }
4059
4060 static void
4061 ixl_enable_adminq(struct i40e_hw *hw)
4062 {
4063         u32             reg;
4064
4065         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4066             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4067             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4068         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4069         ixl_flush(hw);
4070         return;
4071 }
4072
4073 static void
4074 ixl_disable_adminq(struct i40e_hw *hw)
4075 {
4076         u32             reg;
4077
4078         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4079         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4080
4081         return;
4082 }
4083
4084 static void
4085 ixl_enable_queue(struct i40e_hw *hw, int id)
4086 {
4087         u32             reg;
4088
4089         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4090             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4091             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4092         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4093 }
4094
4095 static void
4096 ixl_disable_queue(struct i40e_hw *hw, int id)
4097 {
4098         u32             reg;
4099
4100         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4101         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4102
4103         return;
4104 }
4105
4106 static void
4107 ixl_enable_legacy(struct i40e_hw *hw)
4108 {
4109         u32             reg;
4110         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4111             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4112             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4113         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4114 }
4115
4116 static void
4117 ixl_disable_legacy(struct i40e_hw *hw)
4118 {
4119         u32             reg;
4120
4121         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4122         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4123
4124         return;
4125 }
4126
4127 static void
4128 ixl_update_stats_counters(struct ixl_pf *pf)
4129 {
4130         struct i40e_hw  *hw = &pf->hw;
4131         struct ixl_vsi  *vsi = &pf->vsi;
4132         struct ixl_vf   *vf;
4133
4134         struct i40e_hw_port_stats *nsd = &pf->stats;
4135         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4136
4137         /* Update hw stats */
4138         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4139                            pf->stat_offsets_loaded,
4140                            &osd->crc_errors, &nsd->crc_errors);
4141         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4142                            pf->stat_offsets_loaded,
4143                            &osd->illegal_bytes, &nsd->illegal_bytes);
4144         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4145                            I40E_GLPRT_GORCL(hw->port),
4146                            pf->stat_offsets_loaded,
4147                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4148         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4149                            I40E_GLPRT_GOTCL(hw->port),
4150                            pf->stat_offsets_loaded,
4151                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4152         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4153                            pf->stat_offsets_loaded,
4154                            &osd->eth.rx_discards,
4155                            &nsd->eth.rx_discards);
4156         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4157                            I40E_GLPRT_UPRCL(hw->port),
4158                            pf->stat_offsets_loaded,
4159                            &osd->eth.rx_unicast,
4160                            &nsd->eth.rx_unicast);
4161         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4162                            I40E_GLPRT_UPTCL(hw->port),
4163                            pf->stat_offsets_loaded,
4164                            &osd->eth.tx_unicast,
4165                            &nsd->eth.tx_unicast);
4166         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4167                            I40E_GLPRT_MPRCL(hw->port),
4168                            pf->stat_offsets_loaded,
4169                            &osd->eth.rx_multicast,
4170                            &nsd->eth.rx_multicast);
4171         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4172                            I40E_GLPRT_MPTCL(hw->port),
4173                            pf->stat_offsets_loaded,
4174                            &osd->eth.tx_multicast,
4175                            &nsd->eth.tx_multicast);
4176         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4177                            I40E_GLPRT_BPRCL(hw->port),
4178                            pf->stat_offsets_loaded,
4179                            &osd->eth.rx_broadcast,
4180                            &nsd->eth.rx_broadcast);
4181         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4182                            I40E_GLPRT_BPTCL(hw->port),
4183                            pf->stat_offsets_loaded,
4184                            &osd->eth.tx_broadcast,
4185                            &nsd->eth.tx_broadcast);
4186
4187         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4188                            pf->stat_offsets_loaded,
4189                            &osd->tx_dropped_link_down,
4190                            &nsd->tx_dropped_link_down);
4191         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4192                            pf->stat_offsets_loaded,
4193                            &osd->mac_local_faults,
4194                            &nsd->mac_local_faults);
4195         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4196                            pf->stat_offsets_loaded,
4197                            &osd->mac_remote_faults,
4198                            &nsd->mac_remote_faults);
4199         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4200                            pf->stat_offsets_loaded,
4201                            &osd->rx_length_errors,
4202                            &nsd->rx_length_errors);
4203
4204         /* Flow control (LFC) stats */
4205         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4206                            pf->stat_offsets_loaded,
4207                            &osd->link_xon_rx, &nsd->link_xon_rx);
4208         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4209                            pf->stat_offsets_loaded,
4210                            &osd->link_xon_tx, &nsd->link_xon_tx);
4211         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4212                            pf->stat_offsets_loaded,
4213                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
4214         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4215                            pf->stat_offsets_loaded,
4216                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
4217
4218         /* Packet size stats rx */
4219         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4220                            I40E_GLPRT_PRC64L(hw->port),
4221                            pf->stat_offsets_loaded,
4222                            &osd->rx_size_64, &nsd->rx_size_64);
4223         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4224                            I40E_GLPRT_PRC127L(hw->port),
4225                            pf->stat_offsets_loaded,
4226                            &osd->rx_size_127, &nsd->rx_size_127);
4227         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4228                            I40E_GLPRT_PRC255L(hw->port),
4229                            pf->stat_offsets_loaded,
4230                            &osd->rx_size_255, &nsd->rx_size_255);
4231         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4232                            I40E_GLPRT_PRC511L(hw->port),
4233                            pf->stat_offsets_loaded,
4234                            &osd->rx_size_511, &nsd->rx_size_511);
4235         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4236                            I40E_GLPRT_PRC1023L(hw->port),
4237                            pf->stat_offsets_loaded,
4238                            &osd->rx_size_1023, &nsd->rx_size_1023);
4239         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4240                            I40E_GLPRT_PRC1522L(hw->port),
4241                            pf->stat_offsets_loaded,
4242                            &osd->rx_size_1522, &nsd->rx_size_1522);
4243         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4244                            I40E_GLPRT_PRC9522L(hw->port),
4245                            pf->stat_offsets_loaded,
4246                            &osd->rx_size_big, &nsd->rx_size_big);
4247
4248         /* Packet size stats tx */
4249         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4250                            I40E_GLPRT_PTC64L(hw->port),
4251                            pf->stat_offsets_loaded,
4252                            &osd->tx_size_64, &nsd->tx_size_64);
4253         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4254                            I40E_GLPRT_PTC127L(hw->port),
4255                            pf->stat_offsets_loaded,
4256                            &osd->tx_size_127, &nsd->tx_size_127);
4257         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4258                            I40E_GLPRT_PTC255L(hw->port),
4259                            pf->stat_offsets_loaded,
4260                            &osd->tx_size_255, &nsd->tx_size_255);
4261         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4262                            I40E_GLPRT_PTC511L(hw->port),
4263                            pf->stat_offsets_loaded,
4264                            &osd->tx_size_511, &nsd->tx_size_511);
4265         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4266                            I40E_GLPRT_PTC1023L(hw->port),
4267                            pf->stat_offsets_loaded,
4268                            &osd->tx_size_1023, &nsd->tx_size_1023);
4269         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4270                            I40E_GLPRT_PTC1522L(hw->port),
4271                            pf->stat_offsets_loaded,
4272                            &osd->tx_size_1522, &nsd->tx_size_1522);
4273         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4274                            I40E_GLPRT_PTC9522L(hw->port),
4275                            pf->stat_offsets_loaded,
4276                            &osd->tx_size_big, &nsd->tx_size_big);
4277
4278         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4279                            pf->stat_offsets_loaded,
4280                            &osd->rx_undersize, &nsd->rx_undersize);
4281         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4282                            pf->stat_offsets_loaded,
4283                            &osd->rx_fragments, &nsd->rx_fragments);
4284         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4285                            pf->stat_offsets_loaded,
4286                            &osd->rx_oversize, &nsd->rx_oversize);
4287         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4288                            pf->stat_offsets_loaded,
4289                            &osd->rx_jabber, &nsd->rx_jabber);
4290         pf->stat_offsets_loaded = true;
4291         /* End hw stats */
4292
4293         /* Update vsi stats */
4294         ixl_update_vsi_stats(vsi);
4295
4296         for (int i = 0; i < pf->num_vfs; i++) {
4297                 vf = &pf->vfs[i];
4298                 if (vf->vf_flags & VF_FLAG_ENABLED)
4299                         ixl_update_eth_stats(&pf->vfs[i].vsi);
4300         }
4301 }
4302
4303 /*
4304 ** Tasklet handler for MSIX Adminq interrupts
4305 **  - do outside interrupt since it might sleep
4306 */
4307 static void
4308 ixl_do_adminq(void *context, int pending)
4309 {
4310         struct ixl_pf                   *pf = context;
4311         struct i40e_hw                  *hw = &pf->hw;
4312         struct ixl_vsi                  *vsi = &pf->vsi;
4313         struct i40e_arq_event_info      event;
4314         i40e_status                     ret;
4315         u32                             reg, loop = 0;
4316         u16                             opcode, result;
4317
4318         event.buf_len = IXL_AQ_BUF_SZ;
4319         event.msg_buf = malloc(event.buf_len,
4320             M_DEVBUF, M_NOWAIT | M_ZERO);
4321         if (!event.msg_buf) {
4322                 printf("Unable to allocate adminq memory\n");
4323                 return;
4324         }
4325
4326         IXL_PF_LOCK(pf);
4327         /* clean and process any events */
4328         do {
4329                 ret = i40e_clean_arq_element(hw, &event, &result);
4330                 if (ret)
4331                         break;
4332                 opcode = LE16_TO_CPU(event.desc.opcode);
4333                 switch (opcode) {
4334                 case i40e_aqc_opc_get_link_status:
4335                         ixl_link_event(pf, &event);
4336                         ixl_update_link_status(pf);
4337                         break;
4338                 case i40e_aqc_opc_send_msg_to_pf:
4339 #ifdef PCI_IOV
4340                         ixl_handle_vf_msg(pf, &event);
4341 #endif
4342                         break;
4343                 case i40e_aqc_opc_event_lan_overflow:
4344                         break;
4345                 default:
4346 #ifdef IXL_DEBUG
4347                         printf("AdminQ unknown event %x\n", opcode);
4348 #endif
4349                         break;
4350                 }
4351
4352         } while (result && (loop++ < IXL_ADM_LIMIT));
4353
4354         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4355         reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4356         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4357         free(event.msg_buf, M_DEVBUF);
4358
4359         /*
4360          * If there are still messages to process, reschedule ourselves.
4361          * Otherwise, re-enable our interrupt and go to sleep.
4362          */
4363         if (result > 0)
4364                 taskqueue_enqueue(pf->tq, &pf->adminq);
4365         else
4366                 ixl_enable_intr(vsi);
4367
4368         IXL_PF_UNLOCK(pf);
4369 }
4370
4371 static int
4372 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4373 {
4374         struct ixl_pf   *pf;
4375         int             error, input = 0;
4376
4377         error = sysctl_handle_int(oidp, &input, 0, req);
4378
4379         if (error || !req->newptr)
4380                 return (error);
4381
4382         if (input == 1) {
4383                 pf = (struct ixl_pf *)arg1;
4384                 ixl_print_debug_info(pf);
4385         }
4386
4387         return (error);
4388 }
4389
4390 static void
4391 ixl_print_debug_info(struct ixl_pf *pf)
4392 {
4393         struct i40e_hw          *hw = &pf->hw;
4394         struct ixl_vsi          *vsi = &pf->vsi;
4395         struct ixl_queue        *que = vsi->queues;
4396         struct rx_ring          *rxr = &que->rxr;
4397         struct tx_ring          *txr = &que->txr;
4398         u32                     reg;    
4399
4400
4401         printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4402         printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4403         printf("RX next check = %x\n", rxr->next_check);
4404         printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4405         printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4406         printf("TX desc avail = %x\n", txr->avail);
4407
4408         reg = rd32(hw, I40E_GLV_GORCL(0xc));
4409          printf("RX Bytes = %x\n", reg);
4410         reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4411          printf("Port RX Bytes = %x\n", reg);
4412         reg = rd32(hw, I40E_GLV_RDPC(0xc));
4413          printf("RX discard = %x\n", reg);
4414         reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4415          printf("Port RX discard = %x\n", reg);
4416
4417         reg = rd32(hw, I40E_GLV_TEPC(0xc));
4418          printf("TX errors = %x\n", reg);
4419         reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4420          printf("TX Bytes = %x\n", reg);
4421
4422         reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4423          printf("RX undersize = %x\n", reg);
4424         reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4425          printf("RX fragments = %x\n", reg);
4426         reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4427          printf("RX oversize = %x\n", reg);
4428         reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4429          printf("RX length error = %x\n", reg);
4430         reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4431          printf("mac remote fault = %x\n", reg);
4432         reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4433          printf("mac local fault = %x\n", reg);
4434 }
4435
4436 /**
4437  * Update VSI-specific ethernet statistics counters.
4438  **/
4439 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4440 {
4441         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4442         struct i40e_hw *hw = &pf->hw;
4443         struct i40e_eth_stats *es;
4444         struct i40e_eth_stats *oes;
4445         struct i40e_hw_port_stats *nsd;
4446         u16 stat_idx = vsi->info.stat_counter_idx;
4447
4448         es = &vsi->eth_stats;
4449         oes = &vsi->eth_stats_offsets;
4450         nsd = &pf->stats;
4451
4452         /* Gather up the stats that the hw collects */
4453         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4454                            vsi->stat_offsets_loaded,
4455                            &oes->tx_errors, &es->tx_errors);
4456         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4457                            vsi->stat_offsets_loaded,
4458                            &oes->rx_discards, &es->rx_discards);
4459
4460         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4461                            I40E_GLV_GORCL(stat_idx),
4462                            vsi->stat_offsets_loaded,
4463                            &oes->rx_bytes, &es->rx_bytes);
4464         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4465                            I40E_GLV_UPRCL(stat_idx),
4466                            vsi->stat_offsets_loaded,
4467                            &oes->rx_unicast, &es->rx_unicast);
4468         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4469                            I40E_GLV_MPRCL(stat_idx),
4470                            vsi->stat_offsets_loaded,
4471                            &oes->rx_multicast, &es->rx_multicast);
4472         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4473                            I40E_GLV_BPRCL(stat_idx),
4474                            vsi->stat_offsets_loaded,
4475                            &oes->rx_broadcast, &es->rx_broadcast);
4476
4477         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4478                            I40E_GLV_GOTCL(stat_idx),
4479                            vsi->stat_offsets_loaded,
4480                            &oes->tx_bytes, &es->tx_bytes);
4481         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4482                            I40E_GLV_UPTCL(stat_idx),
4483                            vsi->stat_offsets_loaded,
4484                            &oes->tx_unicast, &es->tx_unicast);
4485         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4486                            I40E_GLV_MPTCL(stat_idx),
4487                            vsi->stat_offsets_loaded,
4488                            &oes->tx_multicast, &es->tx_multicast);
4489         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4490                            I40E_GLV_BPTCL(stat_idx),
4491                            vsi->stat_offsets_loaded,
4492                            &oes->tx_broadcast, &es->tx_broadcast);
4493         vsi->stat_offsets_loaded = true;
4494 }
4495
4496 static void
4497 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4498 {
4499         struct ixl_pf           *pf;
4500         struct ifnet            *ifp;
4501         struct i40e_eth_stats   *es;
4502         u64                     tx_discards;
4503
4504         struct i40e_hw_port_stats *nsd;
4505
4506         pf = vsi->back;
4507         ifp = vsi->ifp;
4508         es = &vsi->eth_stats;
4509         nsd = &pf->stats;
4510
4511         ixl_update_eth_stats(vsi);
4512
4513         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4514         for (int i = 0; i < vsi->num_queues; i++)
4515                 tx_discards += vsi->queues[i].txr.br->br_drops;
4516
4517         /* Update ifnet stats */
4518         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4519                            es->rx_multicast +
4520                            es->rx_broadcast);
4521         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4522                            es->tx_multicast +
4523                            es->tx_broadcast);
4524         IXL_SET_IBYTES(vsi, es->rx_bytes);
4525         IXL_SET_OBYTES(vsi, es->tx_bytes);
4526         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4527         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4528
4529         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4530             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4531             nsd->rx_jabber);
4532         IXL_SET_OERRORS(vsi, es->tx_errors);
4533         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4534         IXL_SET_OQDROPS(vsi, tx_discards);
4535         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4536         IXL_SET_COLLISIONS(vsi, 0);
4537 }
4538
4539 /**
4540  * Reset all of the stats for the given pf
4541  **/
4542 void ixl_pf_reset_stats(struct ixl_pf *pf)
4543 {
4544         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4545         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4546         pf->stat_offsets_loaded = false;
4547 }
4548
4549 /**
4550  * Resets all stats of the given vsi
4551  **/
4552 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4553 {
4554         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4555         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4556         vsi->stat_offsets_loaded = false;
4557 }
4558
4559 /**
4560  * Read and update a 48 bit stat from the hw
4561  *
4562  * Since the device stats are not reset at PFReset, they likely will not
4563  * be zeroed when the driver starts.  We'll save the first values read
4564  * and use them as offsets to be subtracted from the raw values in order
4565  * to report stats that count from zero.
4566  **/
4567 static void
4568 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4569         bool offset_loaded, u64 *offset, u64 *stat)
4570 {
4571         u64 new_data;
4572
4573 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4574         new_data = rd64(hw, loreg);
4575 #else
4576         /*
4577          * Use two rd32's instead of one rd64; FreeBSD versions before
4578          * 10 don't support 8 byte bus reads/writes.
4579          */
4580         new_data = rd32(hw, loreg);
4581         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4582 #endif
4583
4584         if (!offset_loaded)
4585                 *offset = new_data;
4586         if (new_data >= *offset)
4587                 *stat = new_data - *offset;
4588         else
4589                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4590         *stat &= 0xFFFFFFFFFFFFULL;
4591 }
4592
4593 /**
4594  * Read and update a 32 bit stat from the hw
4595  **/
4596 static void
4597 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4598         bool offset_loaded, u64 *offset, u64 *stat)
4599 {
4600         u32 new_data;
4601
4602         new_data = rd32(hw, reg);
4603         if (!offset_loaded)
4604                 *offset = new_data;
4605         if (new_data >= *offset)
4606                 *stat = (u32)(new_data - *offset);
4607         else
4608                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4609 }
4610
4611 /*
4612 ** Set flow control using sysctl:
4613 **      0 - off
4614 **      1 - rx pause
4615 **      2 - tx pause
4616 **      3 - full
4617 */
4618 static int
4619 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4620 {
4621         /*
4622          * TODO: ensure flow control is disabled if
4623          * priority flow control is enabled
4624          *
4625          * TODO: ensure tx CRC by hardware should be enabled
4626          * if tx flow control is enabled.
4627          */
4628         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4629         struct i40e_hw *hw = &pf->hw;
4630         device_t dev = pf->dev;
4631         int error = 0;
4632         enum i40e_status_code aq_error = 0;
4633         u8 fc_aq_err = 0;
4634
4635         /* Get request */
4636         error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4637         if ((error) || (req->newptr == NULL))
4638                 return (error);
4639         if (pf->fc < 0 || pf->fc > 3) {
4640                 device_printf(dev,
4641                     "Invalid fc mode; valid modes are 0 through 3\n");
4642                 return (EINVAL);
4643         }
4644
4645         /*
4646         ** Changing flow control mode currently does not work on
4647         ** 40GBASE-CR4 PHYs
4648         */
4649         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4650             || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4651                 device_printf(dev, "Changing flow control mode unsupported"
4652                     " on 40GBase-CR4 media.\n");
4653                 return (ENODEV);
4654         }
4655
4656         /* Set fc ability for port */
4657         hw->fc.requested_mode = pf->fc;
4658         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4659         if (aq_error) {
4660                 device_printf(dev,
4661                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4662                     __func__, aq_error, fc_aq_err);
4663                 return (EAGAIN);
4664         }
4665
4666         return (0);
4667 }
4668
4669 static int
4670 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4671 {
4672         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4673         struct i40e_hw *hw = &pf->hw;
4674         int error = 0, index = 0;
4675
4676         char *speeds[] = {
4677                 "Unknown",
4678                 "100M",
4679                 "1G",
4680                 "10G",
4681                 "40G",
4682                 "20G"
4683         };
4684
4685         ixl_update_link_status(pf);
4686
4687         switch (hw->phy.link_info.link_speed) {
4688         case I40E_LINK_SPEED_100MB:
4689                 index = 1;
4690                 break;
4691         case I40E_LINK_SPEED_1GB:
4692                 index = 2;
4693                 break;
4694         case I40E_LINK_SPEED_10GB:
4695                 index = 3;
4696                 break;
4697         case I40E_LINK_SPEED_40GB:
4698                 index = 4;
4699                 break;
4700         case I40E_LINK_SPEED_20GB:
4701                 index = 5;
4702                 break;
4703         case I40E_LINK_SPEED_UNKNOWN:
4704         default:
4705                 index = 0;
4706                 break;
4707         }
4708
4709         error = sysctl_handle_string(oidp, speeds[index],
4710             strlen(speeds[index]), req);
4711         return (error);
4712 }
4713
4714 static int
4715 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4716 {
4717         struct i40e_hw *hw = &pf->hw;
4718         device_t dev = pf->dev;
4719         struct i40e_aq_get_phy_abilities_resp abilities;
4720         struct i40e_aq_set_phy_config config;
4721         enum i40e_status_code aq_error = 0;
4722
4723         /* Get current capability information */
4724         aq_error = i40e_aq_get_phy_capabilities(hw,
4725             FALSE, FALSE, &abilities, NULL);
4726         if (aq_error) {
4727                 device_printf(dev,
4728                     "%s: Error getting phy capabilities %d,"
4729                     " aq error: %d\n", __func__, aq_error,
4730                     hw->aq.asq_last_status);
4731                 return (EAGAIN);
4732         }
4733
4734         /* Prepare new config */
4735         bzero(&config, sizeof(config));
4736         config.phy_type = abilities.phy_type;
4737         config.abilities = abilities.abilities
4738             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4739         config.eee_capability = abilities.eee_capability;
4740         config.eeer = abilities.eeer_val;
4741         config.low_power_ctrl = abilities.d3_lpan;
4742         /* Translate into aq cmd link_speed */
4743         if (speeds & 0x8)
4744                 config.link_speed |= I40E_LINK_SPEED_20GB;
4745         if (speeds & 0x4)
4746                 config.link_speed |= I40E_LINK_SPEED_10GB;
4747         if (speeds & 0x2)
4748                 config.link_speed |= I40E_LINK_SPEED_1GB;
4749         if (speeds & 0x1)
4750                 config.link_speed |= I40E_LINK_SPEED_100MB;
4751
4752         /* Do aq command & restart link */
4753         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4754         if (aq_error) {
4755                 device_printf(dev,
4756                     "%s: Error setting new phy config %d,"
4757                     " aq error: %d\n", __func__, aq_error,
4758                     hw->aq.asq_last_status);
4759                 return (EAGAIN);
4760         }
4761
4762         /*
4763         ** This seems a bit heavy handed, but we
4764         ** need to get a reinit on some devices
4765         */
4766         IXL_PF_LOCK(pf);
4767         ixl_stop(pf);
4768         ixl_init_locked(pf);
4769         IXL_PF_UNLOCK(pf);
4770
4771         return (0);
4772 }
4773
4774 /*
4775 ** Control link advertise speed:
4776 **      Flags:
4777 **      0x1 - advertise 100 Mb
4778 **      0x2 - advertise 1G
4779 **      0x4 - advertise 10G
4780 **      0x8 - advertise 20G
4781 **
4782 ** Does not work on 40G devices.
4783 */
4784 static int
4785 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4786 {
4787         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4788         struct i40e_hw *hw = &pf->hw;
4789         device_t dev = pf->dev;
4790         int requested_ls = 0;
4791         int error = 0;
4792
4793         /*
4794         ** FW doesn't support changing advertised speed
4795         ** for 40G devices; speed is always 40G.
4796         */
4797         if (i40e_is_40G_device(hw->device_id))
4798                 return (ENODEV);
4799
4800         /* Read in new mode */
4801         requested_ls = pf->advertised_speed;
4802         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4803         if ((error) || (req->newptr == NULL))
4804                 return (error);
4805         /* Check for sane value */
4806         if (requested_ls < 0x1 || requested_ls > 0xE) {
4807                 device_printf(dev, "Invalid advertised speed; "
4808                     "valid modes are 0x1 through 0xE\n");
4809                 return (EINVAL);
4810         }
4811         /* Then check for validity based on adapter type */
4812         switch (hw->device_id) {
4813         case I40E_DEV_ID_10G_BASE_T:
4814                 if (requested_ls & 0x8) {
4815                         device_printf(dev,
4816                             "20Gbs speed not supported on this device.\n");
4817                         return (EINVAL);
4818                 }
4819                 break;
4820         case I40E_DEV_ID_20G_KR2:
4821                 if (requested_ls & 0x1) {
4822                         device_printf(dev,
4823                             "100Mbs speed not supported on this device.\n");
4824                         return (EINVAL);
4825                 }
4826                 break;
4827         default:
4828                 if (requested_ls & ~0x6) {
4829                         device_printf(dev,
4830                             "Only 1/10Gbs speeds are supported on this device.\n");
4831                         return (EINVAL);
4832                 }
4833                 break;
4834         }
4835
4836         /* Exit if no change */
4837         if (pf->advertised_speed == requested_ls)
4838                 return (0);
4839
4840         error = ixl_set_advertised_speeds(pf, requested_ls);
4841         if (error)
4842                 return (error);
4843
4844         pf->advertised_speed = requested_ls;
4845         ixl_update_link_status(pf);
4846         return (0);
4847 }
4848
4849 /*
4850 ** Get the width and transaction speed of
4851 ** the bus this adapter is plugged into.
4852 */
4853 static u16
4854 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4855 {
4856         u16                     link;
4857         u32                     offset;
4858                 
4859                 
4860         /* Get the PCI Express Capabilities offset */
4861         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4862
4863         /* ...and read the Link Status Register */
4864         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4865
4866         switch (link & I40E_PCI_LINK_WIDTH) {
4867         case I40E_PCI_LINK_WIDTH_1:
4868                 hw->bus.width = i40e_bus_width_pcie_x1;
4869                 break;
4870         case I40E_PCI_LINK_WIDTH_2:
4871                 hw->bus.width = i40e_bus_width_pcie_x2;
4872                 break;
4873         case I40E_PCI_LINK_WIDTH_4:
4874                 hw->bus.width = i40e_bus_width_pcie_x4;
4875                 break;
4876         case I40E_PCI_LINK_WIDTH_8:
4877                 hw->bus.width = i40e_bus_width_pcie_x8;
4878                 break;
4879         default:
4880                 hw->bus.width = i40e_bus_width_unknown;
4881                 break;
4882         }
4883
4884         switch (link & I40E_PCI_LINK_SPEED) {
4885         case I40E_PCI_LINK_SPEED_2500:
4886                 hw->bus.speed = i40e_bus_speed_2500;
4887                 break;
4888         case I40E_PCI_LINK_SPEED_5000:
4889                 hw->bus.speed = i40e_bus_speed_5000;
4890                 break;
4891         case I40E_PCI_LINK_SPEED_8000:
4892                 hw->bus.speed = i40e_bus_speed_8000;
4893                 break;
4894         default:
4895                 hw->bus.speed = i40e_bus_speed_unknown;
4896                 break;
4897         }
4898
4899
4900         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4901             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4902             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4903             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4904             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4905             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4906             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4907             ("Unknown"));
4908
4909         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4910             (hw->bus.speed < i40e_bus_speed_8000)) {
4911                 device_printf(dev, "PCI-Express bandwidth available"
4912                     " for this device\n     may be insufficient for"
4913                     " optimal performance.\n");
4914                 device_printf(dev, "For expected performance a x8 "
4915                     "PCIE Gen3 slot is required.\n");
4916         }
4917
4918         return (link);
4919 }
4920
4921 static int
4922 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4923 {
4924         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4925         struct i40e_hw  *hw = &pf->hw;
4926         char            buf[32];
4927
4928         snprintf(buf, sizeof(buf),
4929             "f%d.%d a%d.%d n%02x.%02x e%08x",
4930             hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4931             hw->aq.api_maj_ver, hw->aq.api_min_ver,
4932             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4933             IXL_NVM_VERSION_HI_SHIFT,
4934             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4935             IXL_NVM_VERSION_LO_SHIFT,
4936             hw->nvm.eetrack);
4937         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4938 }
4939
4940
4941 #ifdef IXL_DEBUG_SYSCTL
4942 static int
4943 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4944 {
4945         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4946         struct i40e_hw *hw = &pf->hw;
4947         struct i40e_link_status link_status;
4948         char buf[512];
4949
4950         enum i40e_status_code aq_error = 0;
4951
4952         aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4953         if (aq_error) {
4954                 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4955                 return (EPERM);
4956         }
4957
4958         sprintf(buf, "\n"
4959             "PHY Type : %#04x\n"
4960             "Speed    : %#04x\n" 
4961             "Link info: %#04x\n" 
4962             "AN info  : %#04x\n" 
4963             "Ext info : %#04x", 
4964             link_status.phy_type, link_status.link_speed, 
4965             link_status.link_info, link_status.an_info,
4966             link_status.ext_info);
4967
4968         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4969 }
4970
4971 static int
4972 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4973 {
4974         struct ixl_pf           *pf = (struct ixl_pf *)arg1;
4975         struct i40e_hw          *hw = &pf->hw;
4976         char                    buf[512];
4977         enum i40e_status_code   aq_error = 0;
4978
4979         struct i40e_aq_get_phy_abilities_resp abilities;
4980
4981         aq_error = i40e_aq_get_phy_capabilities(hw,
4982             TRUE, FALSE, &abilities, NULL);
4983         if (aq_error) {
4984                 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4985                 return (EPERM);
4986         }
4987
4988         sprintf(buf, "\n"
4989             "PHY Type : %#010x\n"
4990             "Speed    : %#04x\n" 
4991             "Abilities: %#04x\n" 
4992             "EEE cap  : %#06x\n" 
4993             "EEER reg : %#010x\n" 
4994             "D3 Lpan  : %#04x",
4995             abilities.phy_type, abilities.link_speed, 
4996             abilities.abilities, abilities.eee_capability,
4997             abilities.eeer_val, abilities.d3_lpan);
4998
4999         return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5000 }
5001
5002 static int
5003 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5004 {
5005         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5006         struct ixl_vsi *vsi = &pf->vsi;
5007         struct ixl_mac_filter *f;
5008         char *buf, *buf_i;
5009
5010         int error = 0;
5011         int ftl_len = 0;
5012         int ftl_counter = 0;
5013         int buf_len = 0;
5014         int entry_len = 42;
5015
5016         SLIST_FOREACH(f, &vsi->ftl, next) {
5017                 ftl_len++;
5018         }
5019
5020         if (ftl_len < 1) {
5021                 sysctl_handle_string(oidp, "(none)", 6, req);
5022                 return (0);
5023         }
5024
5025         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5026         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5027
5028         sprintf(buf_i++, "\n");
5029         SLIST_FOREACH(f, &vsi->ftl, next) {
5030                 sprintf(buf_i,
5031                     MAC_FORMAT ", vlan %4d, flags %#06x",
5032                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5033                 buf_i += entry_len;
5034                 /* don't print '\n' for last entry */
5035                 if (++ftl_counter != ftl_len) {
5036                         sprintf(buf_i, "\n");
5037                         buf_i++;
5038                 }
5039         }
5040
5041         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5042         if (error)
5043                 printf("sysctl error: %d\n", error);
5044         free(buf, M_DEVBUF);
5045         return error;
5046 }
5047
5048 #define IXL_SW_RES_SIZE 0x14
5049 static int
5050 ixl_res_alloc_cmp(const void *a, const void *b)
5051 {
5052         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5053         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5054         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5055
5056         return ((int)one->resource_type - (int)two->resource_type);
5057 }
5058
5059 static int
5060 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5061 {
5062         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5063         struct i40e_hw *hw = &pf->hw;
5064         device_t dev = pf->dev;
5065         struct sbuf *buf;
5066         int error = 0;
5067
5068         u8 num_entries;
5069         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5070
5071         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5072         if (!buf) {
5073                 device_printf(dev, "Could not allocate sbuf for output.\n");
5074                 return (ENOMEM);
5075         }
5076
5077         bzero(resp, sizeof(resp));
5078         error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5079                                 resp,
5080                                 IXL_SW_RES_SIZE,
5081                                 NULL);
5082         if (error) {
5083                 device_printf(dev,
5084                     "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5085                     __func__, error, hw->aq.asq_last_status);
5086                 sbuf_delete(buf);
5087                 return error;
5088         }
5089
5090         /* Sort entries by type for display */
5091         qsort(resp, num_entries,
5092             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5093             &ixl_res_alloc_cmp);
5094
5095         sbuf_cat(buf, "\n");
5096         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5097         sbuf_printf(buf,
5098             "Type | Guaranteed | Total | Used   | Un-allocated\n"
5099             "     | (this)     | (all) | (this) | (all)       \n");
5100         for (int i = 0; i < num_entries; i++) {
5101                 sbuf_printf(buf,
5102                     "%#4x | %10d   %5d   %6d   %12d",
5103                     resp[i].resource_type,
5104                     resp[i].guaranteed,
5105                     resp[i].total,
5106                     resp[i].used,
5107                     resp[i].total_unalloced);
5108                 if (i < num_entries - 1)
5109                         sbuf_cat(buf, "\n");
5110         }
5111
5112         error = sbuf_finish(buf);
5113         if (error) {
5114                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5115                 sbuf_delete(buf);
5116                 return error;
5117         }
5118
5119         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5120         if (error)
5121                 device_printf(dev, "sysctl error: %d\n", error);
5122         sbuf_delete(buf);
5123
5124         return (error);
5125 }
5126
5127 /*
5128 ** Caller must init and delete sbuf; this function will clear and
5129 ** finish it for caller.
5130 */
5131 static char *
5132 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5133 {
5134         sbuf_clear(s);
5135
5136         if (seid == 0 && uplink)
5137                 sbuf_cat(s, "Network");
5138         else if (seid == 0)
5139                 sbuf_cat(s, "Host");
5140         else if (seid == 1)
5141                 sbuf_cat(s, "EMP");
5142         else if (seid <= 5)
5143                 sbuf_printf(s, "MAC %d", seid - 2);
5144         else if (seid <= 15)
5145                 sbuf_cat(s, "Reserved");
5146         else if (seid <= 31)
5147                 sbuf_printf(s, "PF %d", seid - 16);
5148         else if (seid <= 159)
5149                 sbuf_printf(s, "VF %d", seid - 32);
5150         else if (seid <= 287)
5151                 sbuf_cat(s, "Reserved");
5152         else if (seid <= 511)
5153                 sbuf_cat(s, "Other"); // for other structures
5154         else if (seid <= 895)
5155                 sbuf_printf(s, "VSI %d", seid - 512);
5156         else if (seid <= 1023)
5157                 sbuf_printf(s, "Reserved");
5158         else
5159                 sbuf_cat(s, "Invalid");
5160
5161         sbuf_finish(s);
5162         return sbuf_data(s);
5163 }
5164
5165 static int
5166 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5167 {
5168         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5169         struct i40e_hw *hw = &pf->hw;
5170         device_t dev = pf->dev;
5171         struct sbuf *buf;
5172         struct sbuf *nmbuf;
5173         int error = 0;
5174         u8 aq_buf[I40E_AQ_LARGE_BUF];
5175
5176         u16 next = 0;
5177         struct i40e_aqc_get_switch_config_resp *sw_config;
5178         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5179
5180         buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5181         if (!buf) {
5182                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5183                 return (ENOMEM);
5184         }
5185
5186         error = i40e_aq_get_switch_config(hw, sw_config,
5187             sizeof(aq_buf), &next, NULL);
5188         if (error) {
5189                 device_printf(dev,
5190                     "%s: aq_get_switch_config() error %d, aq error %d\n",
5191                     __func__, error, hw->aq.asq_last_status);
5192                 sbuf_delete(buf);
5193                 return error;
5194         }
5195
5196         nmbuf = sbuf_new_auto();
5197         if (!nmbuf) {
5198                 device_printf(dev, "Could not allocate sbuf for name output.\n");
5199                 return (ENOMEM);
5200         }
5201
5202         sbuf_cat(buf, "\n");
5203         // Assuming <= 255 elements in switch
5204         sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5205         /* Exclude:
5206         ** Revision -- all elements are revision 1 for now
5207         */
5208         sbuf_printf(buf,
5209             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5210             "                |          |          | (uplink)\n");
5211         for (int i = 0; i < sw_config->header.num_reported; i++) {
5212                 // "%4d (%8s) | %8s   %8s   %#8x",
5213                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5214                 sbuf_cat(buf, " ");
5215                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5216                     sw_config->element[i].seid, false));
5217                 sbuf_cat(buf, " | ");
5218                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5219                     sw_config->element[i].uplink_seid, true));
5220                 sbuf_cat(buf, "   ");
5221                 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5222                     sw_config->element[i].downlink_seid, false));
5223                 sbuf_cat(buf, "   ");
5224                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5225                 if (i < sw_config->header.num_reported - 1)
5226                         sbuf_cat(buf, "\n");
5227         }
5228         sbuf_delete(nmbuf);
5229
5230         error = sbuf_finish(buf);
5231         if (error) {
5232                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5233                 sbuf_delete(buf);
5234                 return error;
5235         }
5236
5237         error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5238         if (error)
5239                 device_printf(dev, "sysctl error: %d\n", error);
5240         sbuf_delete(buf);
5241
5242         return (error);
5243 }
5244 #endif /* IXL_DEBUG_SYSCTL */
5245
5246
5247 #ifdef PCI_IOV
5248 static int
5249 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5250 {
5251         struct i40e_hw *hw;
5252         struct ixl_vsi *vsi;
5253         struct i40e_vsi_context vsi_ctx;
5254         int i;
5255         uint16_t first_queue;
5256         enum i40e_status_code code;
5257
5258         hw = &pf->hw;
5259         vsi = &pf->vsi;
5260
5261         vsi_ctx.pf_num = hw->pf_id;
5262         vsi_ctx.uplink_seid = pf->veb_seid;
5263         vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5264         vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5265         vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5266
5267         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5268
5269         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5270         vsi_ctx.info.switch_id = htole16(0);
5271
5272         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5273         vsi_ctx.info.sec_flags = 0;
5274         if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5275                 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5276
5277         vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5278         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5279             I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5280
5281         vsi_ctx.info.valid_sections |=
5282             htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5283         vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5284         first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5285         for (i = 0; i < IXLV_MAX_QUEUES; i++)
5286                 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5287         for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5288                 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5289
5290         vsi_ctx.info.tc_mapping[0] = htole16(
5291             (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5292             (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5293
5294         code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5295         if (code != I40E_SUCCESS)
5296                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5297         vf->vsi.seid = vsi_ctx.seid;
5298         vf->vsi.vsi_num = vsi_ctx.vsi_number;
5299         vf->vsi.first_queue = first_queue;
5300         vf->vsi.num_queues = IXLV_MAX_QUEUES;
5301
5302         code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5303         if (code != I40E_SUCCESS)
5304                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5305
5306         code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5307         if (code != I40E_SUCCESS) {
5308                 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5309                     ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5310                 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5311         }
5312
5313         memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5314         return (0);
5315 }
5316
5317 static int
5318 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5319 {
5320         struct i40e_hw *hw;
5321         int error;
5322
5323         hw = &pf->hw;
5324
5325         error = ixl_vf_alloc_vsi(pf, vf);
5326         if (error != 0)
5327                 return (error);
5328
5329         vf->vsi.hw_filters_add = 0;
5330         vf->vsi.hw_filters_del = 0;
5331         ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5332         ixl_reconfigure_filters(&vf->vsi);
5333
5334         return (0);
5335 }
5336
5337 static void
5338 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5339     uint32_t val)
5340 {
5341         uint32_t qtable;
5342         int index, shift;
5343
5344         /*
5345          * Two queues are mapped in a single register, so we have to do some
5346          * gymnastics to convert the queue number into a register index and
5347          * shift.
5348          */
5349         index = qnum / 2;
5350         shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5351
5352         qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5353         qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5354         qtable |= val << shift;
5355         wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5356 }
5357
5358 static void
5359 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5360 {
5361         struct i40e_hw *hw;
5362         uint32_t qtable;
5363         int i;
5364
5365         hw = &pf->hw;
5366
5367         /*
5368          * Contiguous mappings aren't actually supported by the hardware,
5369          * so we have to use non-contiguous mappings.
5370          */
5371         wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5372              I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5373
5374         wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5375             I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5376
5377         for (i = 0; i < vf->vsi.num_queues; i++) {
5378                 qtable = (vf->vsi.first_queue + i) <<
5379                     I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5380
5381                 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5382         }
5383
5384         /* Map queues allocated to VF to its VSI. */
5385         for (i = 0; i < vf->vsi.num_queues; i++)
5386                 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5387
5388         /* Set rest of VSI queues as unused. */
5389         for (; i < IXL_MAX_VSI_QUEUES; i++)
5390                 ixl_vf_map_vsi_queue(hw, vf, i,
5391                     I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5392
5393         ixl_flush(hw);
5394 }
5395
5396 static void
5397 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5398 {
5399         struct i40e_hw *hw;
5400
5401         hw = &pf->hw;
5402
5403         if (vsi->seid == 0)
5404                 return;
5405
5406         i40e_aq_delete_element(hw, vsi->seid, NULL);
5407 }
5408
5409 static void
5410 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5411 {
5412
5413         wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5414         ixl_flush(hw);
5415 }
5416
5417 static void
5418 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5419 {
5420
5421         wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5422             I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5423         ixl_flush(hw);
5424 }
5425
5426 static void
5427 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5428 {
5429         struct i40e_hw *hw;
5430         uint32_t vfint_reg, vpint_reg;
5431         int i;
5432
5433         hw = &pf->hw;
5434
5435         ixl_vf_vsi_release(pf, &vf->vsi);
5436
5437         /* Index 0 has a special register. */
5438         ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5439
5440         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5441                 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5442                 ixl_vf_disable_queue_intr(hw, vfint_reg);
5443         }
5444
5445         /* Index 0 has a special register. */
5446         ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5447
5448         for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5449                 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5450                 ixl_vf_unregister_intr(hw, vpint_reg);
5451         }
5452
5453         vf->vsi.num_queues = 0;
5454 }
5455
5456 static int
5457 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5458 {
5459         struct i40e_hw *hw;
5460         int i;
5461         uint16_t global_vf_num;
5462         uint32_t ciad;
5463
5464         hw = &pf->hw;
5465         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5466
5467         wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5468              (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5469         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5470                 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5471                 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5472                         return (0);
5473                 DELAY(1);
5474         }
5475
5476         return (ETIMEDOUT);
5477 }
5478
5479 static void
5480 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5481 {
5482         struct i40e_hw *hw;
5483         uint32_t vfrtrig;
5484
5485         hw = &pf->hw;
5486
5487         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5488         vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5489         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5490         ixl_flush(hw);
5491
5492         ixl_reinit_vf(pf, vf);
5493 }
5494
5495 static void
5496 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5497 {
5498         struct i40e_hw *hw;
5499         uint32_t vfrstat, vfrtrig;
5500         int i, error;
5501
5502         hw = &pf->hw;
5503
5504         error = ixl_flush_pcie(pf, vf);
5505         if (error != 0)
5506                 device_printf(pf->dev,
5507                     "Timed out waiting for PCIe activity to stop on VF-%d\n",
5508                     vf->vf_num);
5509
5510         for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5511                 DELAY(10);
5512
5513                 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5514                 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5515                         break;
5516         }
5517
5518         if (i == IXL_VF_RESET_TIMEOUT)
5519                 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5520
5521         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5522
5523         vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5524         vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5525         wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5526
5527         if (vf->vsi.seid != 0)
5528                 ixl_disable_rings(&vf->vsi);
5529
5530         ixl_vf_release_resources(pf, vf);
5531         ixl_vf_setup_vsi(pf, vf);
5532         ixl_vf_map_queues(pf, vf);
5533
5534         wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5535         ixl_flush(hw);
5536 }
5537
5538 static const char *
5539 ixl_vc_opcode_str(uint16_t op)
5540 {
5541
5542         switch (op) {
5543         case I40E_VIRTCHNL_OP_VERSION:
5544                 return ("VERSION");
5545         case I40E_VIRTCHNL_OP_RESET_VF:
5546                 return ("RESET_VF");
5547         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5548                 return ("GET_VF_RESOURCES");
5549         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5550                 return ("CONFIG_TX_QUEUE");
5551         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5552                 return ("CONFIG_RX_QUEUE");
5553         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5554                 return ("CONFIG_VSI_QUEUES");
5555         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5556                 return ("CONFIG_IRQ_MAP");
5557         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5558                 return ("ENABLE_QUEUES");
5559         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5560                 return ("DISABLE_QUEUES");
5561         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5562                 return ("ADD_ETHER_ADDRESS");
5563         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5564                 return ("DEL_ETHER_ADDRESS");
5565         case I40E_VIRTCHNL_OP_ADD_VLAN:
5566                 return ("ADD_VLAN");
5567         case I40E_VIRTCHNL_OP_DEL_VLAN:
5568                 return ("DEL_VLAN");
5569         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5570                 return ("CONFIG_PROMISCUOUS_MODE");
5571         case I40E_VIRTCHNL_OP_GET_STATS:
5572                 return ("GET_STATS");
5573         case I40E_VIRTCHNL_OP_FCOE:
5574                 return ("FCOE");
5575         case I40E_VIRTCHNL_OP_EVENT:
5576                 return ("EVENT");
5577         default:
5578                 return ("UNKNOWN");
5579         }
5580 }
5581
5582 static int
5583 ixl_vc_opcode_level(uint16_t opcode)
5584 {
5585
5586         switch (opcode) {
5587         case I40E_VIRTCHNL_OP_GET_STATS:
5588                 return (10);
5589         default:
5590                 return (5);
5591         }
5592 }
5593
5594 static void
5595 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5596     enum i40e_status_code status, void *msg, uint16_t len)
5597 {
5598         struct i40e_hw *hw;
5599         int global_vf_id;
5600
5601         hw = &pf->hw;
5602         global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5603
5604         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5605             "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5606             ixl_vc_opcode_str(op), op, status, vf->vf_num);
5607
5608         i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5609 }
5610
5611 static void
5612 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5613 {
5614
5615         ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5616 }
5617
5618 static void
5619 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5620     enum i40e_status_code status, const char *file, int line)
5621 {
5622
5623         I40E_VC_DEBUG(pf, 1,
5624             "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5625             ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5626         ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5627 }
5628
5629 static void
5630 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5631     uint16_t msg_size)
5632 {
5633         struct i40e_virtchnl_version_info reply;
5634
5635         if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5636                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5637                     I40E_ERR_PARAM);
5638                 return;
5639         }
5640
5641         reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5642         reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5643         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5644             sizeof(reply));
5645 }
5646
5647 static void
5648 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5649     uint16_t msg_size)
5650 {
5651
5652         if (msg_size != 0) {
5653                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5654                     I40E_ERR_PARAM);
5655                 return;
5656         }
5657
5658         ixl_reset_vf(pf, vf);
5659
5660         /* No response to a reset message. */
5661 }
5662
5663 static void
5664 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5665     uint16_t msg_size)
5666 {
5667         struct i40e_virtchnl_vf_resource reply;
5668
5669         if (msg_size != 0) {
5670                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5671                     I40E_ERR_PARAM);
5672                 return;
5673         }
5674
5675         bzero(&reply, sizeof(reply));
5676
5677         reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5678
5679         reply.num_vsis = 1;
5680         reply.num_queue_pairs = vf->vsi.num_queues;
5681         reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5682         reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5683         reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5684         reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5685         memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5686
5687         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5688             I40E_SUCCESS, &reply, sizeof(reply));
5689 }
5690
5691 static int
5692 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5693     struct i40e_virtchnl_txq_info *info)
5694 {
5695         struct i40e_hw *hw;
5696         struct i40e_hmc_obj_txq txq;
5697         uint16_t global_queue_num, global_vf_num;
5698         enum i40e_status_code status;
5699         uint32_t qtx_ctl;
5700
5701         hw = &pf->hw;
5702         global_queue_num = vf->vsi.first_queue + info->queue_id;
5703         global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5704         bzero(&txq, sizeof(txq));
5705
5706         status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5707         if (status != I40E_SUCCESS)
5708                 return (EINVAL);
5709
5710         txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5711
5712         txq.head_wb_ena = info->headwb_enabled;
5713         txq.head_wb_addr = info->dma_headwb_addr;
5714         txq.qlen = info->ring_len;
5715         txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5716         txq.rdylist_act = 0;
5717
5718         status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5719         if (status != I40E_SUCCESS)
5720                 return (EINVAL);
5721
5722         qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5723             (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5724             (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5725         wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5726         ixl_flush(hw);
5727
5728         return (0);
5729 }
5730
5731 static int
5732 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5733     struct i40e_virtchnl_rxq_info *info)
5734 {
5735         struct i40e_hw *hw;
5736         struct i40e_hmc_obj_rxq rxq;
5737         uint16_t global_queue_num;
5738         enum i40e_status_code status;
5739
5740         hw = &pf->hw;
5741         global_queue_num = vf->vsi.first_queue + info->queue_id;
5742         bzero(&rxq, sizeof(rxq));
5743
5744         if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5745                 return (EINVAL);
5746
5747         if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5748             info->max_pkt_size < ETHER_MIN_LEN)
5749                 return (EINVAL);
5750
5751         if (info->splithdr_enabled) {
5752                 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5753                         return (EINVAL);
5754
5755                 rxq.hsplit_0 = info->rx_split_pos &
5756                     (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5757                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5758                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5759                      I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5760                 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5761
5762                 rxq.dtype = 2;
5763         }
5764
5765         status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5766         if (status != I40E_SUCCESS)
5767                 return (EINVAL);
5768
5769         rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5770         rxq.qlen = info->ring_len;
5771
5772         rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5773
5774         rxq.dsize = 1;
5775         rxq.crcstrip = 1;
5776         rxq.l2tsel = 1;
5777
5778         rxq.rxmax = info->max_pkt_size;
5779         rxq.tphrdesc_ena = 1;
5780         rxq.tphwdesc_ena = 1;
5781         rxq.tphdata_ena = 1;
5782         rxq.tphhead_ena = 1;
5783         rxq.lrxqthresh = 2;
5784         rxq.prefena = 1;
5785
5786         status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5787         if (status != I40E_SUCCESS)
5788                 return (EINVAL);
5789
5790         return (0);
5791 }
5792
5793 static void
5794 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5795     uint16_t msg_size)
5796 {
5797         struct i40e_virtchnl_vsi_queue_config_info *info;
5798         struct i40e_virtchnl_queue_pair_info *pair;
5799         int i;
5800
5801         if (msg_size < sizeof(*info)) {
5802                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5803                     I40E_ERR_PARAM);
5804                 return;
5805         }
5806
5807         info = msg;
5808         if (info->num_queue_pairs == 0) {
5809                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5810                     I40E_ERR_PARAM);
5811                 return;
5812         }
5813
5814         if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5815                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5816                     I40E_ERR_PARAM);
5817                 return;
5818         }
5819
5820         if (info->vsi_id != vf->vsi.vsi_num) {
5821                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5822                     I40E_ERR_PARAM);
5823                 return;
5824         }
5825
5826         for (i = 0; i < info->num_queue_pairs; i++) {
5827                 pair = &info->qpair[i];
5828
5829                 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5830                     pair->rxq.vsi_id != vf->vsi.vsi_num ||
5831                     pair->txq.queue_id != pair->rxq.queue_id ||
5832                     pair->txq.queue_id >= vf->vsi.num_queues) {
5833
5834                         i40e_send_vf_nack(pf, vf,
5835                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5836                         return;
5837                 }
5838
5839                 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5840                         i40e_send_vf_nack(pf, vf,
5841                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5842                         return;
5843                 }
5844
5845                 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5846                         i40e_send_vf_nack(pf, vf,
5847                             I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5848                         return;
5849                 }
5850         }
5851
5852         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5853 }
5854
5855 static void
5856 ixl_vf_set_qctl(struct ixl_pf *pf,
5857     const struct i40e_virtchnl_vector_map *vector,
5858     enum i40e_queue_type cur_type, uint16_t cur_queue,
5859     enum i40e_queue_type *last_type, uint16_t *last_queue)
5860 {
5861         uint32_t offset, qctl;
5862         uint16_t itr_indx;
5863
5864         if (cur_type == I40E_QUEUE_TYPE_RX) {
5865                 offset = I40E_QINT_RQCTL(cur_queue);
5866                 itr_indx = vector->rxitr_idx;
5867         } else {
5868                 offset = I40E_QINT_TQCTL(cur_queue);
5869                 itr_indx = vector->txitr_idx;
5870         }
5871
5872         qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5873             (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5874             (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5875             I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5876             (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5877
5878         wr32(&pf->hw, offset, qctl);
5879
5880         *last_type = cur_type;
5881         *last_queue = cur_queue;
5882 }
5883
5884 static void
5885 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5886     const struct i40e_virtchnl_vector_map *vector)
5887 {
5888         struct i40e_hw *hw;
5889         u_int qindex;
5890         enum i40e_queue_type type, last_type;
5891         uint32_t lnklst_reg;
5892         uint16_t rxq_map, txq_map, cur_queue, last_queue;
5893
5894         hw = &pf->hw;
5895
5896         rxq_map = vector->rxq_map;
5897         txq_map = vector->txq_map;
5898
5899         last_queue = IXL_END_OF_INTR_LNKLST;
5900         last_type = I40E_QUEUE_TYPE_RX;
5901
5902         /*
5903          * The datasheet says to optimize performance, RX queues and TX queues
5904          * should be interleaved in the interrupt linked list, so we process
5905          * both at once here.
5906          */
5907         while ((rxq_map != 0) || (txq_map != 0)) {
5908                 if (txq_map != 0) {
5909                         qindex = ffs(txq_map) - 1;
5910                         type = I40E_QUEUE_TYPE_TX;
5911                         cur_queue = vf->vsi.first_queue + qindex;
5912                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
5913                             &last_type, &last_queue);
5914                         txq_map &= ~(1 << qindex);
5915                 }
5916
5917                 if (rxq_map != 0) {
5918                         qindex = ffs(rxq_map) - 1;
5919                         type = I40E_QUEUE_TYPE_RX;
5920                         cur_queue = vf->vsi.first_queue + qindex;
5921                         ixl_vf_set_qctl(pf, vector, type, cur_queue,
5922                             &last_type, &last_queue);
5923                         rxq_map &= ~(1 << qindex);
5924                 }
5925         }
5926
5927         if (vector->vector_id == 0)
5928                 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5929         else
5930                 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5931                     vf->vf_num);
5932         wr32(hw, lnklst_reg,
5933             (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5934             (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5935
5936         ixl_flush(hw);
5937 }
5938
5939 static void
5940 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5941     uint16_t msg_size)
5942 {
5943         struct i40e_virtchnl_irq_map_info *map;
5944         struct i40e_virtchnl_vector_map *vector;
5945         struct i40e_hw *hw;
5946         int i, largest_txq, largest_rxq;
5947
5948         hw = &pf->hw;
5949
5950         if (msg_size < sizeof(*map)) {
5951                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5952                     I40E_ERR_PARAM);
5953                 return;
5954         }
5955
5956         map = msg;
5957         if (map->num_vectors == 0) {
5958                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5959                     I40E_ERR_PARAM);
5960                 return;
5961         }
5962
5963         if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5964                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5965                     I40E_ERR_PARAM);
5966                 return;
5967         }
5968
5969         for (i = 0; i < map->num_vectors; i++) {
5970                 vector = &map->vecmap[i];
5971
5972                 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5973                     vector->vsi_id != vf->vsi.vsi_num) {
5974                         i40e_send_vf_nack(pf, vf,
5975                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5976                         return;
5977                 }
5978
5979                 if (vector->rxq_map != 0) {
5980                         largest_rxq = fls(vector->rxq_map) - 1;
5981                         if (largest_rxq >= vf->vsi.num_queues) {
5982                                 i40e_send_vf_nack(pf, vf,
5983                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5984                                     I40E_ERR_PARAM);
5985                                 return;
5986                         }
5987                 }
5988
5989                 if (vector->txq_map != 0) {
5990                         largest_txq = fls(vector->txq_map) - 1;
5991                         if (largest_txq >= vf->vsi.num_queues) {
5992                                 i40e_send_vf_nack(pf, vf,
5993                                     I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5994                                     I40E_ERR_PARAM);
5995                                 return;
5996                         }
5997                 }
5998
5999                 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6000                     vector->txitr_idx > IXL_MAX_ITR_IDX) {
6001                         i40e_send_vf_nack(pf, vf,
6002                             I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6003                             I40E_ERR_PARAM);
6004                         return;
6005                 }
6006
6007                 ixl_vf_config_vector(pf, vf, vector);
6008         }
6009
6010         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6011 }
6012
6013 static void
6014 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6015     uint16_t msg_size)
6016 {
6017         struct i40e_virtchnl_queue_select *select;
6018         int error;
6019
6020         if (msg_size != sizeof(*select)) {
6021                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6022                     I40E_ERR_PARAM);
6023                 return;
6024         }
6025
6026         select = msg;
6027         if (select->vsi_id != vf->vsi.vsi_num ||
6028             select->rx_queues == 0 || select->tx_queues == 0) {
6029                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6030                     I40E_ERR_PARAM);
6031                 return;
6032         }
6033
6034         error = ixl_enable_rings(&vf->vsi);
6035         if (error) {
6036                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6037                     I40E_ERR_TIMEOUT);
6038                 return;
6039         }
6040
6041         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6042 }
6043
6044 static void
6045 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6046     void *msg, uint16_t msg_size)
6047 {
6048         struct i40e_virtchnl_queue_select *select;
6049         int error;
6050
6051         if (msg_size != sizeof(*select)) {
6052                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6053                     I40E_ERR_PARAM);
6054                 return;
6055         }
6056
6057         select = msg;
6058         if (select->vsi_id != vf->vsi.vsi_num ||
6059             select->rx_queues == 0 || select->tx_queues == 0) {
6060                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6061                     I40E_ERR_PARAM);
6062                 return;
6063         }
6064
6065         error = ixl_disable_rings(&vf->vsi);
6066         if (error) {
6067                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6068                     I40E_ERR_TIMEOUT);
6069                 return;
6070         }
6071
6072         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6073 }
6074
6075 static boolean_t
6076 ixl_zero_mac(const uint8_t *addr)
6077 {
6078         uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6079
6080         return (cmp_etheraddr(addr, zero));
6081 }
6082
6083 static boolean_t
6084 ixl_bcast_mac(const uint8_t *addr)
6085 {
6086
6087         return (cmp_etheraddr(addr, ixl_bcast_addr));
6088 }
6089
6090 static int
6091 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6092 {
6093
6094         if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6095                 return (EINVAL);
6096
6097         /*
6098          * If the VF is not allowed to change its MAC address, don't let it
6099          * set a MAC filter for an address that is not a multicast address and
6100          * is not its assigned MAC.
6101          */
6102         if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6103             !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6104                 return (EPERM);
6105
6106         return (0);
6107 }
6108
6109 static void
6110 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6111     uint16_t msg_size)
6112 {
6113         struct i40e_virtchnl_ether_addr_list *addr_list;
6114         struct i40e_virtchnl_ether_addr *addr;
6115         struct ixl_vsi *vsi;
6116         int i;
6117         size_t expected_size;
6118
6119         vsi = &vf->vsi;
6120
6121         if (msg_size < sizeof(*addr_list)) {
6122                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6123                     I40E_ERR_PARAM);
6124                 return;
6125         }
6126
6127         addr_list = msg;
6128         expected_size = sizeof(*addr_list) +
6129             addr_list->num_elements * sizeof(*addr);
6130
6131         if (addr_list->num_elements == 0 ||
6132             addr_list->vsi_id != vsi->vsi_num ||
6133             msg_size != expected_size) {
6134                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6135                     I40E_ERR_PARAM);
6136                 return;
6137         }
6138
6139         for (i = 0; i < addr_list->num_elements; i++) {
6140                 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6141                         i40e_send_vf_nack(pf, vf,
6142                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6143                         return;
6144                 }
6145         }
6146
6147         for (i = 0; i < addr_list->num_elements; i++) {
6148                 addr = &addr_list->list[i];
6149                 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6150         }
6151
6152         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6153 }
6154
6155 static void
6156 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6157     uint16_t msg_size)
6158 {
6159         struct i40e_virtchnl_ether_addr_list *addr_list;
6160         struct i40e_virtchnl_ether_addr *addr;
6161         size_t expected_size;
6162         int i;
6163
6164         if (msg_size < sizeof(*addr_list)) {
6165                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6166                     I40E_ERR_PARAM);
6167                 return;
6168         }
6169
6170         addr_list = msg;
6171         expected_size = sizeof(*addr_list) +
6172             addr_list->num_elements * sizeof(*addr);
6173
6174         if (addr_list->num_elements == 0 ||
6175             addr_list->vsi_id != vf->vsi.vsi_num ||
6176             msg_size != expected_size) {
6177                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6178                     I40E_ERR_PARAM);
6179                 return;
6180         }
6181
6182         for (i = 0; i < addr_list->num_elements; i++) {
6183                 addr = &addr_list->list[i];
6184                 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6185                         i40e_send_vf_nack(pf, vf,
6186                             I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6187                         return;
6188                 }
6189         }
6190
6191         for (i = 0; i < addr_list->num_elements; i++) {
6192                 addr = &addr_list->list[i];
6193                 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6194         }
6195
6196         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6197 }
6198
6199 static enum i40e_status_code
6200 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6201 {
6202         struct i40e_vsi_context vsi_ctx;
6203
6204         vsi_ctx.seid = vf->vsi.seid;
6205
6206         bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6207         vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6208         vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6209             I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6210         return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6211 }
6212
6213 static void
6214 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6215     uint16_t msg_size)
6216 {
6217         struct i40e_virtchnl_vlan_filter_list *filter_list;
6218         enum i40e_status_code code;
6219         size_t expected_size;
6220         int i;
6221
6222         if (msg_size < sizeof(*filter_list)) {
6223                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6224                     I40E_ERR_PARAM);
6225                 return;
6226         }
6227
6228         filter_list = msg;
6229         expected_size = sizeof(*filter_list) +
6230             filter_list->num_elements * sizeof(uint16_t);
6231         if (filter_list->num_elements == 0 ||
6232             filter_list->vsi_id != vf->vsi.vsi_num ||
6233             msg_size != expected_size) {
6234                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6235                     I40E_ERR_PARAM);
6236                 return;
6237         }
6238
6239         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6240                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6241                     I40E_ERR_PARAM);
6242                 return;
6243         }
6244
6245         for (i = 0; i < filter_list->num_elements; i++) {
6246                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6247                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6248                             I40E_ERR_PARAM);
6249                         return;
6250                 }
6251         }
6252
6253         code = ixl_vf_enable_vlan_strip(pf, vf);
6254         if (code != I40E_SUCCESS) {
6255                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6256                     I40E_ERR_PARAM);
6257         }
6258
6259         for (i = 0; i < filter_list->num_elements; i++)
6260                 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6261
6262         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6263 }
6264
6265 static void
6266 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6267     uint16_t msg_size)
6268 {
6269         struct i40e_virtchnl_vlan_filter_list *filter_list;
6270         int i;
6271         size_t expected_size;
6272
6273         if (msg_size < sizeof(*filter_list)) {
6274                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6275                     I40E_ERR_PARAM);
6276                 return;
6277         }
6278
6279         filter_list = msg;
6280         expected_size = sizeof(*filter_list) +
6281             filter_list->num_elements * sizeof(uint16_t);
6282         if (filter_list->num_elements == 0 ||
6283             filter_list->vsi_id != vf->vsi.vsi_num ||
6284             msg_size != expected_size) {
6285                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6286                     I40E_ERR_PARAM);
6287                 return;
6288         }
6289
6290         for (i = 0; i < filter_list->num_elements; i++) {
6291                 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6292                         i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6293                             I40E_ERR_PARAM);
6294                         return;
6295                 }
6296         }
6297
6298         if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6299                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6300                     I40E_ERR_PARAM);
6301                 return;
6302         }
6303
6304         for (i = 0; i < filter_list->num_elements; i++)
6305                 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6306
6307         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6308 }
6309
6310 static void
6311 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6312     void *msg, uint16_t msg_size)
6313 {
6314         struct i40e_virtchnl_promisc_info *info;
6315         enum i40e_status_code code;
6316
6317         if (msg_size != sizeof(*info)) {
6318                 i40e_send_vf_nack(pf, vf,
6319                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6320                 return;
6321         }
6322
6323         if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6324                 i40e_send_vf_nack(pf, vf,
6325                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6326                 return;
6327         }
6328
6329         info = msg;
6330         if (info->vsi_id != vf->vsi.vsi_num) {
6331                 i40e_send_vf_nack(pf, vf,
6332                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6333                 return;
6334         }
6335
6336         code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6337             info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6338         if (code != I40E_SUCCESS) {
6339                 i40e_send_vf_nack(pf, vf,
6340                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6341                 return;
6342         }
6343
6344         code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6345             info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6346         if (code != I40E_SUCCESS) {
6347                 i40e_send_vf_nack(pf, vf,
6348                     I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6349                 return;
6350         }
6351
6352         ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6353 }
6354
6355 static void
6356 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6357     uint16_t msg_size)
6358 {
6359         struct i40e_virtchnl_queue_select *queue;
6360
6361         if (msg_size != sizeof(*queue)) {
6362                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6363                     I40E_ERR_PARAM);
6364                 return;
6365         }
6366
6367         queue = msg;
6368         if (queue->vsi_id != vf->vsi.vsi_num) {
6369                 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6370                     I40E_ERR_PARAM);
6371                 return;
6372         }
6373
6374         ixl_update_eth_stats(&vf->vsi);
6375
6376         ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6377             I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6378 }
6379
6380 static void
6381 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6382 {
6383         struct ixl_vf *vf;
6384         void *msg;
6385         uint16_t vf_num, msg_size;
6386         uint32_t opcode;
6387
6388         vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6389         opcode = le32toh(event->desc.cookie_high);
6390
6391         if (vf_num >= pf->num_vfs) {
6392                 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6393                 return;
6394         }
6395
6396         vf = &pf->vfs[vf_num];
6397         msg = event->msg_buf;
6398         msg_size = event->msg_len;
6399
6400         I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6401             "Got msg %s(%d) from VF-%d of size %d\n",
6402             ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6403
6404         switch (opcode) {
6405         case I40E_VIRTCHNL_OP_VERSION:
6406                 ixl_vf_version_msg(pf, vf, msg, msg_size);
6407                 break;
6408         case I40E_VIRTCHNL_OP_RESET_VF:
6409                 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6410                 break;
6411         case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6412                 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6413                 break;
6414         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6415                 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6416                 break;
6417         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6418                 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6419                 break;
6420         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6421                 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6422                 break;
6423         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6424                 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6425                 break;
6426         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6427                 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6428                 break;
6429         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6430                 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6431                 break;
6432         case I40E_VIRTCHNL_OP_ADD_VLAN:
6433                 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6434                 break;
6435         case I40E_VIRTCHNL_OP_DEL_VLAN:
6436                 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6437                 break;
6438         case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6439                 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6440                 break;
6441         case I40E_VIRTCHNL_OP_GET_STATS:
6442                 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6443                 break;
6444
6445         /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6446         case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6447         case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6448         default:
6449                 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6450                 break;
6451         }
6452 }
6453
6454 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6455 static void
6456 ixl_handle_vflr(void *arg, int pending)
6457 {
6458         struct ixl_pf *pf;
6459         struct i40e_hw *hw;
6460         uint16_t global_vf_num;
6461         uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6462         int i;
6463
6464         pf = arg;
6465         hw = &pf->hw;
6466
6467         IXL_PF_LOCK(pf);
6468         for (i = 0; i < pf->num_vfs; i++) {
6469                 global_vf_num = hw->func_caps.vf_base_id + i;
6470
6471                 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6472                 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6473                 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6474                 if (vflrstat & vflrstat_mask) {
6475                         wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6476                             vflrstat_mask);
6477
6478                         ixl_reinit_vf(pf, &pf->vfs[i]);
6479                 }
6480         }
6481
6482         icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6483         icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6484         wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6485         ixl_flush(hw);
6486
6487         IXL_PF_UNLOCK(pf);
6488 }
6489
6490 static int
6491 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6492 {
6493
6494         switch (err) {
6495         case I40E_AQ_RC_EPERM:
6496                 return (EPERM);
6497         case I40E_AQ_RC_ENOENT:
6498                 return (ENOENT);
6499         case I40E_AQ_RC_ESRCH:
6500                 return (ESRCH);
6501         case I40E_AQ_RC_EINTR:
6502                 return (EINTR);
6503         case I40E_AQ_RC_EIO:
6504                 return (EIO);
6505         case I40E_AQ_RC_ENXIO:
6506                 return (ENXIO);
6507         case I40E_AQ_RC_E2BIG:
6508                 return (E2BIG);
6509         case I40E_AQ_RC_EAGAIN:
6510                 return (EAGAIN);
6511         case I40E_AQ_RC_ENOMEM:
6512                 return (ENOMEM);
6513         case I40E_AQ_RC_EACCES:
6514                 return (EACCES);
6515         case I40E_AQ_RC_EFAULT:
6516                 return (EFAULT);
6517         case I40E_AQ_RC_EBUSY:
6518                 return (EBUSY);
6519         case I40E_AQ_RC_EEXIST:
6520                 return (EEXIST);
6521         case I40E_AQ_RC_EINVAL:
6522                 return (EINVAL);
6523         case I40E_AQ_RC_ENOTTY:
6524                 return (ENOTTY);
6525         case I40E_AQ_RC_ENOSPC:
6526                 return (ENOSPC);
6527         case I40E_AQ_RC_ENOSYS:
6528                 return (ENOSYS);
6529         case I40E_AQ_RC_ERANGE:
6530                 return (ERANGE);
6531         case I40E_AQ_RC_EFLUSHED:
6532                 return (EINVAL);        /* No exact equivalent in errno.h */
6533         case I40E_AQ_RC_BAD_ADDR:
6534                 return (EFAULT);
6535         case I40E_AQ_RC_EMODE:
6536                 return (EPERM);
6537         case I40E_AQ_RC_EFBIG:
6538                 return (EFBIG);
6539         default:
6540                 return (EINVAL);
6541         }
6542 }
6543
6544 static int
6545 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6546 {
6547         struct ixl_pf *pf;
6548         struct i40e_hw *hw;
6549         struct ixl_vsi *pf_vsi;
6550         enum i40e_status_code ret;
6551         int i, error;
6552
6553         pf = device_get_softc(dev);
6554         hw = &pf->hw;
6555         pf_vsi = &pf->vsi;
6556
6557         IXL_PF_LOCK(pf);
6558         pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6559             M_ZERO);
6560
6561         if (pf->vfs == NULL) {
6562                 error = ENOMEM;
6563                 goto fail;
6564         }
6565
6566         for (i = 0; i < num_vfs; i++)
6567                 sysctl_ctx_init(&pf->vfs[i].ctx);
6568
6569         ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6570             1, FALSE, FALSE, &pf->veb_seid, NULL);
6571         if (ret != I40E_SUCCESS) {
6572                 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6573                 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6574                     error);
6575                 goto fail;
6576         }
6577
6578         ixl_configure_msix(pf);
6579         ixl_enable_adminq(hw);
6580
6581         pf->num_vfs = num_vfs;
6582         IXL_PF_UNLOCK(pf);
6583         return (0);
6584
6585 fail:
6586         free(pf->vfs, M_IXL);
6587         pf->vfs = NULL;
6588         IXL_PF_UNLOCK(pf);
6589         return (error);
6590 }
6591
6592 static void
6593 ixl_uninit_iov(device_t dev)
6594 {
6595         struct ixl_pf *pf;
6596         struct i40e_hw *hw;
6597         struct ixl_vsi *vsi;
6598         struct ifnet *ifp;
6599         struct ixl_vf *vfs;
6600         int i, num_vfs;
6601
6602         pf = device_get_softc(dev);
6603         hw = &pf->hw;
6604         vsi = &pf->vsi;
6605         ifp = vsi->ifp;
6606
6607         IXL_PF_LOCK(pf);
6608         for (i = 0; i < pf->num_vfs; i++) {
6609                 if (pf->vfs[i].vsi.seid != 0)
6610                         i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6611         }
6612
6613         if (pf->veb_seid != 0) {
6614                 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6615                 pf->veb_seid = 0;
6616         }
6617
6618         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6619                 ixl_disable_intr(vsi);
6620
6621         vfs = pf->vfs;
6622         num_vfs = pf->num_vfs;
6623
6624         pf->vfs = NULL;
6625         pf->num_vfs = 0;
6626         IXL_PF_UNLOCK(pf);
6627
6628         /* Do this after the unlock as sysctl_ctx_free might sleep. */
6629         for (i = 0; i < num_vfs; i++)
6630                 sysctl_ctx_free(&vfs[i].ctx);
6631         free(vfs, M_IXL);
6632 }
6633
6634 static int
6635 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6636 {
6637         char sysctl_name[QUEUE_NAME_LEN];
6638         struct ixl_pf *pf;
6639         struct ixl_vf *vf;
6640         const void *mac;
6641         size_t size;
6642         int error;
6643
6644         pf = device_get_softc(dev);
6645         vf = &pf->vfs[vfnum];
6646
6647         IXL_PF_LOCK(pf);
6648         vf->vf_num = vfnum;
6649
6650         vf->vsi.back = pf;
6651         vf->vf_flags = VF_FLAG_ENABLED;
6652         SLIST_INIT(&vf->vsi.ftl);
6653
6654         error = ixl_vf_setup_vsi(pf, vf);
6655         if (error != 0)
6656                 goto out;
6657
6658         if (nvlist_exists_binary(params, "mac-addr")) {
6659                 mac = nvlist_get_binary(params, "mac-addr", &size);
6660                 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6661
6662                 if (nvlist_get_bool(params, "allow-set-mac"))
6663                         vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6664         } else
6665                 /*
6666                  * If the administrator has not specified a MAC address then
6667                  * we must allow the VF to choose one.
6668                  */
6669                 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6670
6671         if (nvlist_get_bool(params, "mac-anti-spoof"))
6672                 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6673
6674         if (nvlist_get_bool(params, "allow-promisc"))
6675                 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6676
6677         vf->vf_flags |= VF_FLAG_VLAN_CAP;
6678
6679         ixl_reset_vf(pf, vf);
6680 out:
6681         IXL_PF_UNLOCK(pf);
6682         if (error == 0) {
6683                 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6684                 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6685         }
6686
6687         return (error);
6688 }
6689 #endif /* PCI_IOV */