1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixl_driver_version[] = "1.4.3";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixl_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
77 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
78 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
80 /* required last entry */
84 /*********************************************************************
85 * Table of branding strings
86 *********************************************************************/
88 static char *ixl_strings[] = {
89 "Intel(R) Ethernet Connection XL710 Driver"
93 /*********************************************************************
95 *********************************************************************/
96 static int ixl_probe(device_t);
97 static int ixl_attach(device_t);
98 static int ixl_detach(device_t);
99 static int ixl_shutdown(device_t);
100 static int ixl_get_hw_capabilities(struct ixl_pf *);
101 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
102 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void ixl_init(void *);
104 static void ixl_init_locked(struct ixl_pf *);
105 static void ixl_stop(struct ixl_pf *);
106 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
107 static int ixl_media_change(struct ifnet *);
108 static void ixl_update_link_status(struct ixl_pf *);
109 static int ixl_allocate_pci_resources(struct ixl_pf *);
110 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
111 static int ixl_setup_stations(struct ixl_pf *);
112 static int ixl_switch_config(struct ixl_pf *);
113 static int ixl_initialize_vsi(struct ixl_vsi *);
114 static int ixl_assign_vsi_msix(struct ixl_pf *);
115 static int ixl_assign_vsi_legacy(struct ixl_pf *);
116 static int ixl_init_msix(struct ixl_pf *);
117 static void ixl_configure_msix(struct ixl_pf *);
118 static void ixl_configure_itr(struct ixl_pf *);
119 static void ixl_configure_legacy(struct ixl_pf *);
120 static void ixl_free_pci_resources(struct ixl_pf *);
121 static void ixl_local_timer(void *);
122 static int ixl_setup_interface(device_t, struct ixl_vsi *);
123 static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124 static void ixl_config_rss(struct ixl_vsi *);
125 static void ixl_set_queue_rx_itr(struct ixl_queue *);
126 static void ixl_set_queue_tx_itr(struct ixl_queue *);
127 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
129 static int ixl_enable_rings(struct ixl_vsi *);
130 static int ixl_disable_rings(struct ixl_vsi *);
131 static void ixl_enable_intr(struct ixl_vsi *);
132 static void ixl_disable_intr(struct ixl_vsi *);
133 static void ixl_disable_rings_intr(struct ixl_vsi *);
135 static void ixl_enable_adminq(struct i40e_hw *);
136 static void ixl_disable_adminq(struct i40e_hw *);
137 static void ixl_enable_queue(struct i40e_hw *, int);
138 static void ixl_disable_queue(struct i40e_hw *, int);
139 static void ixl_enable_legacy(struct i40e_hw *);
140 static void ixl_disable_legacy(struct i40e_hw *);
142 static void ixl_set_promisc(struct ixl_vsi *);
143 static void ixl_add_multi(struct ixl_vsi *);
144 static void ixl_del_multi(struct ixl_vsi *);
145 static void ixl_register_vlan(void *, struct ifnet *, u16);
146 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
147 static void ixl_setup_vlan_filters(struct ixl_vsi *);
149 static void ixl_init_filters(struct ixl_vsi *);
150 static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
151 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
154 static void ixl_del_hw_filters(struct ixl_vsi *, int);
155 static struct ixl_mac_filter *
156 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158 static void ixl_free_mac_filters(struct ixl_vsi *vsi);
161 /* Sysctl debug interface */
162 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
163 static void ixl_print_debug_info(struct ixl_pf *);
165 /* The MSI/X Interrupt handlers */
166 static void ixl_intr(void *);
167 static void ixl_msix_que(void *);
168 static void ixl_msix_adminq(void *);
169 static void ixl_handle_mdd_event(struct ixl_pf *);
171 /* Deferred interrupt tasklets */
172 static void ixl_do_adminq(void *, int);
174 /* Sysctl handlers */
175 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
176 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
177 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
178 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
181 static void ixl_add_hw_stats(struct ixl_pf *);
182 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
183 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
184 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
185 struct sysctl_oid_list *,
186 struct i40e_eth_stats *);
187 static void ixl_update_stats_counters(struct ixl_pf *);
188 static void ixl_update_eth_stats(struct ixl_vsi *);
189 static void ixl_update_vsi_stats(struct ixl_vsi *);
190 static void ixl_pf_reset_stats(struct ixl_pf *);
191 static void ixl_vsi_reset_stats(struct ixl_vsi *);
192 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
194 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
197 #ifdef IXL_DEBUG_SYSCTL
198 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
199 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
200 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
201 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
202 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
206 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
208 static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
209 static void ixl_uninit_iov(device_t dev);
210 static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
212 static void ixl_handle_vf_msg(struct ixl_pf *,
213 struct i40e_arq_event_info *);
214 static void ixl_handle_vflr(void *arg, int pending);
216 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
217 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
220 /*********************************************************************
221 * FreeBSD Device Interface Entry Points
222 *********************************************************************/
224 static device_method_t ixl_methods[] = {
225 /* Device interface */
226 DEVMETHOD(device_probe, ixl_probe),
227 DEVMETHOD(device_attach, ixl_attach),
228 DEVMETHOD(device_detach, ixl_detach),
229 DEVMETHOD(device_shutdown, ixl_shutdown),
231 DEVMETHOD(pci_init_iov, ixl_init_iov),
232 DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
233 DEVMETHOD(pci_add_vf, ixl_add_vf),
238 static driver_t ixl_driver = {
239 "ixl", ixl_methods, sizeof(struct ixl_pf),
242 devclass_t ixl_devclass;
243 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
245 MODULE_DEPEND(ixl, pci, 1, 1, 1);
246 MODULE_DEPEND(ixl, ether, 1, 1, 1);
248 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
249 #endif /* DEV_NETMAP */
252 ** Global reset mutex
254 static struct mtx ixl_reset_mtx;
257 ** TUNEABLE PARAMETERS:
260 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
261 "IXL driver parameters");
264 * MSIX should be the default for best performance,
265 * but this allows it to be forced off for testing.
267 static int ixl_enable_msix = 1;
268 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
270 "Enable MSI-X interrupts");
273 ** Number of descriptors per ring:
274 ** - TX and RX are the same size
276 static int ixl_ringsz = DEFAULT_RING;
277 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
278 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
279 &ixl_ringsz, 0, "Descriptor Ring Size");
282 ** This can be set manually, if left as 0 the
283 ** number of queues will be calculated based
284 ** on cpus and msix vectors available.
286 int ixl_max_queues = 0;
287 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
289 &ixl_max_queues, 0, "Number of Queues");
292 ** Controls for Interrupt Throttling
293 ** - true/false for dynamic adjustment
294 ** - default values for static ITR
296 int ixl_dynamic_rx_itr = 0;
297 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
299 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
301 int ixl_dynamic_tx_itr = 0;
302 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
304 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
306 int ixl_rx_itr = IXL_ITR_8K;
307 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
308 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
309 &ixl_rx_itr, 0, "RX Interrupt Rate");
311 int ixl_tx_itr = IXL_ITR_4K;
312 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
313 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
314 &ixl_tx_itr, 0, "TX Interrupt Rate");
317 static int ixl_enable_fdir = 1;
318 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
319 /* Rate at which we sample */
320 int ixl_atr_rate = 20;
321 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
325 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
326 #include <dev/netmap/if_ixl_netmap.h>
327 #endif /* DEV_NETMAP */
329 static char *ixl_fc_string[6] = {
338 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
340 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
341 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
343 /*********************************************************************
344 * Device identification routine
346 * ixl_probe determines if the driver should be loaded on
347 * the hardware based on PCI vendor/device id of the device.
349 * return BUS_PROBE_DEFAULT on success, positive on failure
350 *********************************************************************/
353 ixl_probe(device_t dev)
355 ixl_vendor_info_t *ent;
357 u16 pci_vendor_id, pci_device_id;
358 u16 pci_subvendor_id, pci_subdevice_id;
359 char device_name[256];
360 static bool lock_init = FALSE;
362 INIT_DEBUGOUT("ixl_probe: begin");
364 pci_vendor_id = pci_get_vendor(dev);
365 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
368 pci_device_id = pci_get_device(dev);
369 pci_subvendor_id = pci_get_subvendor(dev);
370 pci_subdevice_id = pci_get_subdevice(dev);
372 ent = ixl_vendor_info_array;
373 while (ent->vendor_id != 0) {
374 if ((pci_vendor_id == ent->vendor_id) &&
375 (pci_device_id == ent->device_id) &&
377 ((pci_subvendor_id == ent->subvendor_id) ||
378 (ent->subvendor_id == 0)) &&
380 ((pci_subdevice_id == ent->subdevice_id) ||
381 (ent->subdevice_id == 0))) {
382 sprintf(device_name, "%s, Version - %s",
383 ixl_strings[ent->index],
385 device_set_desc_copy(dev, device_name);
386 /* One shot mutex init */
387 if (lock_init == FALSE) {
389 mtx_init(&ixl_reset_mtx,
391 "IXL RESET Lock", MTX_DEF);
393 return (BUS_PROBE_DEFAULT);
400 /*********************************************************************
401 * Device initialization routine
403 * The attach entry point is called when the driver is being loaded.
404 * This routine identifies the type of hardware, allocates all resources
405 * and initializes the hardware.
407 * return 0 on success, positive on failure
408 *********************************************************************/
411 ixl_attach(device_t dev)
419 nvlist_t *pf_schema, *vf_schema;
423 INIT_DEBUGOUT("ixl_attach: begin");
425 /* Allocate, clear, and link in our primary soft structure */
426 pf = device_get_softc(dev);
427 pf->dev = pf->osdep.dev = dev;
431 ** Note this assumes we have a single embedded VSI,
432 ** this could be enhanced later to allocate multiple
438 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
440 /* Set up the timer callout */
441 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
444 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
445 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
446 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
447 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
449 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
450 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
451 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
452 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
454 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
456 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
457 pf, 0, ixl_current_speed, "A", "Current Port Speed");
459 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
462 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
464 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
465 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
466 OID_AUTO, "rx_itr", CTLFLAG_RW,
467 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
469 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
470 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
472 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
474 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
475 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
476 OID_AUTO, "tx_itr", CTLFLAG_RW,
477 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
479 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
480 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
481 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
482 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
484 #ifdef IXL_DEBUG_SYSCTL
485 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
488 ixl_debug_info, "I", "Debug Information");
490 /* Debug shared-code message level */
491 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
492 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
493 OID_AUTO, "debug_mask", CTLFLAG_RW,
494 &pf->hw.debug_mask, 0, "Debug Message Level");
496 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
497 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498 OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
499 0, "PF/VF Virtual Channel debug level");
501 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
502 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
504 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
506 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
507 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
508 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
509 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
511 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
512 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
514 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
516 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
517 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
518 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
519 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
521 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
522 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
523 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
524 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
527 /* Save off the PCI information */
528 hw->vendor_id = pci_get_vendor(dev);
529 hw->device_id = pci_get_device(dev);
530 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
531 hw->subsystem_vendor_id =
532 pci_read_config(dev, PCIR_SUBVEND_0, 2);
533 hw->subsystem_device_id =
534 pci_read_config(dev, PCIR_SUBDEV_0, 2);
536 hw->bus.device = pci_get_slot(dev);
537 hw->bus.func = pci_get_function(dev);
539 pf->vc_debug_lvl = 1;
541 /* Do PCI setup - map BAR0, etc */
542 if (ixl_allocate_pci_resources(pf)) {
543 device_printf(dev, "Allocation of PCI resources failed\n");
548 /* Establish a clean starting point */
550 error = i40e_pf_reset(hw);
552 device_printf(dev,"PF reset failure %x\n", error);
557 /* Set admin queue parameters */
558 hw->aq.num_arq_entries = IXL_AQ_LEN;
559 hw->aq.num_asq_entries = IXL_AQ_LEN;
560 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
561 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
563 /* Initialize the shared code */
564 error = i40e_init_shared_code(hw);
566 device_printf(dev,"Unable to initialize the shared code\n");
571 /* Set up the admin queue */
572 error = i40e_init_adminq(hw);
574 device_printf(dev, "The driver for the device stopped "
575 "because the NVM image is newer than expected.\n"
576 "You must install the most recent version of "
577 " the network driver.\n");
580 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
582 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
583 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
584 device_printf(dev, "The driver for the device detected "
585 "a newer version of the NVM image than expected.\n"
586 "Please install the most recent version of the network driver.\n");
587 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
588 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
589 device_printf(dev, "The driver for the device detected "
590 "an older version of the NVM image than expected.\n"
591 "Please update the NVM image.\n");
594 i40e_clear_pxe_mode(hw);
596 /* Get capabilities from the device */
597 error = ixl_get_hw_capabilities(pf);
599 device_printf(dev, "HW capabilities failure!\n");
603 /* Set up host memory cache */
604 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
605 hw->func_caps.num_rx_qp, 0, 0);
607 device_printf(dev, "init_lan_hmc failed: %d\n", error);
611 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
613 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
617 /* Disable LLDP from the firmware */
618 i40e_aq_stop_lldp(hw, TRUE, NULL);
620 i40e_get_mac_addr(hw, hw->mac.addr);
621 error = i40e_validate_mac_addr(hw->mac.addr);
623 device_printf(dev, "validate_mac_addr failed: %d\n", error);
626 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
627 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
629 /* Set up VSI and queues */
630 if (ixl_setup_stations(pf) != 0) {
631 device_printf(dev, "setup stations failed!\n");
636 /* Initialize mac filter list for VSI */
637 SLIST_INIT(&vsi->ftl);
639 /* Set up interrupt routing here */
641 error = ixl_assign_vsi_msix(pf);
643 error = ixl_assign_vsi_legacy(pf);
647 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
648 (hw->aq.fw_maj_ver < 4)) {
650 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
652 device_printf(dev, "link restart failed, aq_err=%d\n",
653 pf->hw.aq.asq_last_status);
656 /* Determine link state */
657 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
658 i40e_get_link_status(hw, &pf->link_up);
660 /* Setup OS specific network interface */
661 if (ixl_setup_interface(dev, vsi) != 0) {
662 device_printf(dev, "interface setup failed!\n");
667 error = ixl_switch_config(pf);
669 device_printf(dev, "Initial switch config failed: %d\n", error);
673 /* Limit phy interrupts to link and modules failure */
674 error = i40e_aq_set_phy_int_mask(hw,
675 I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
677 device_printf(dev, "set phy mask failed: %d\n", error);
679 /* Get the bus configuration and set the shared code */
680 bus = ixl_get_bus_info(hw, dev);
681 i40e_set_pci_config_data(hw, bus);
683 /* Initialize statistics */
684 ixl_pf_reset_stats(pf);
685 ixl_update_stats_counters(pf);
686 ixl_add_hw_stats(pf);
688 /* Register for VLAN events */
689 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
690 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
691 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
692 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
695 /* SR-IOV is only supported when MSI-X is in use. */
697 pf_schema = pci_iov_schema_alloc_node();
698 vf_schema = pci_iov_schema_alloc_node();
699 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
700 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
701 IOV_SCHEMA_HASDEFAULT, TRUE);
702 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
703 IOV_SCHEMA_HASDEFAULT, FALSE);
704 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
705 IOV_SCHEMA_HASDEFAULT, FALSE);
707 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
710 "Failed to initialize SR-IOV (error=%d)\n",
716 ixl_netmap_attach(vsi);
717 #endif /* DEV_NETMAP */
718 INIT_DEBUGOUT("ixl_attach: end");
722 if (vsi->ifp != NULL)
725 i40e_shutdown_lan_hmc(hw);
727 i40e_shutdown_adminq(hw);
729 ixl_free_pci_resources(pf);
731 IXL_PF_LOCK_DESTROY(pf);
735 /*********************************************************************
736 * Device removal routine
738 * The detach entry point is called when the driver is being removed.
739 * This routine stops the adapter and deallocates all the resources
740 * that were allocated for driver operation.
742 * return 0 on success, positive on failure
743 *********************************************************************/
746 ixl_detach(device_t dev)
748 struct ixl_pf *pf = device_get_softc(dev);
749 struct i40e_hw *hw = &pf->hw;
750 struct ixl_vsi *vsi = &pf->vsi;
751 struct ixl_queue *que = vsi->queues;
757 INIT_DEBUGOUT("ixl_detach: begin");
759 /* Make sure VLANS are not using driver */
760 if (vsi->ifp->if_vlantrunk != NULL) {
761 device_printf(dev,"Vlan in use, detach first\n");
766 error = pci_iov_detach(dev);
768 device_printf(dev, "SR-IOV in use; detach first.\n");
773 ether_ifdetach(vsi->ifp);
774 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 for (int i = 0; i < vsi->num_queues; i++, que++) {
782 taskqueue_drain(que->tq, &que->task);
783 taskqueue_drain(que->tq, &que->tx_task);
784 taskqueue_free(que->tq);
788 /* Shutdown LAN HMC */
789 status = i40e_shutdown_lan_hmc(hw);
792 "Shutdown LAN HMC failed with code %d\n", status);
794 /* Shutdown admin queue */
795 status = i40e_shutdown_adminq(hw);
798 "Shutdown Admin queue failed with code %d\n", status);
800 /* Unregister VLAN events */
801 if (vsi->vlan_attach != NULL)
802 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
803 if (vsi->vlan_detach != NULL)
804 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
806 callout_drain(&pf->timer);
808 netmap_detach(vsi->ifp);
809 #endif /* DEV_NETMAP */
810 ixl_free_pci_resources(pf);
811 bus_generic_detach(dev);
814 IXL_PF_LOCK_DESTROY(pf);
818 /*********************************************************************
820 * Shutdown entry point
822 **********************************************************************/
825 ixl_shutdown(device_t dev)
827 struct ixl_pf *pf = device_get_softc(dev);
835 /*********************************************************************
837 * Get the hardware capabilities
839 **********************************************************************/
842 ixl_get_hw_capabilities(struct ixl_pf *pf)
844 struct i40e_aqc_list_capabilities_element_resp *buf;
845 struct i40e_hw *hw = &pf->hw;
846 device_t dev = pf->dev;
851 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
853 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
854 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
855 device_printf(dev, "Unable to allocate cap memory\n");
859 /* This populates the hw struct */
860 error = i40e_aq_discover_capabilities(hw, buf, len,
861 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
863 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
865 /* retry once with a larger buffer */
869 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
870 device_printf(dev, "capability discovery failed: %d\n",
871 pf->hw.aq.asq_last_status);
875 /* Capture this PF's starting queue pair */
876 pf->qbase = hw->func_caps.base_queue;
879 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
880 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
881 hw->pf_id, hw->func_caps.num_vfs,
882 hw->func_caps.num_msix_vectors,
883 hw->func_caps.num_msix_vectors_vf,
884 hw->func_caps.fd_filters_guaranteed,
885 hw->func_caps.fd_filters_best_effort,
886 hw->func_caps.num_tx_qp,
887 hw->func_caps.num_rx_qp,
888 hw->func_caps.base_queue);
894 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
896 device_t dev = vsi->dev;
898 /* Enable/disable TXCSUM/TSO4 */
899 if (!(ifp->if_capenable & IFCAP_TXCSUM)
900 && !(ifp->if_capenable & IFCAP_TSO4)) {
901 if (mask & IFCAP_TXCSUM) {
902 ifp->if_capenable |= IFCAP_TXCSUM;
903 /* enable TXCSUM, restore TSO if previously enabled */
904 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
905 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
906 ifp->if_capenable |= IFCAP_TSO4;
909 else if (mask & IFCAP_TSO4) {
910 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
911 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
913 "TSO4 requires txcsum, enabling both...\n");
915 } else if((ifp->if_capenable & IFCAP_TXCSUM)
916 && !(ifp->if_capenable & IFCAP_TSO4)) {
917 if (mask & IFCAP_TXCSUM)
918 ifp->if_capenable &= ~IFCAP_TXCSUM;
919 else if (mask & IFCAP_TSO4)
920 ifp->if_capenable |= IFCAP_TSO4;
921 } else if((ifp->if_capenable & IFCAP_TXCSUM)
922 && (ifp->if_capenable & IFCAP_TSO4)) {
923 if (mask & IFCAP_TXCSUM) {
924 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
925 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
927 "TSO4 requires txcsum, disabling both...\n");
928 } else if (mask & IFCAP_TSO4)
929 ifp->if_capenable &= ~IFCAP_TSO4;
932 /* Enable/disable TXCSUM_IPV6/TSO6 */
933 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
934 && !(ifp->if_capenable & IFCAP_TSO6)) {
935 if (mask & IFCAP_TXCSUM_IPV6) {
936 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
937 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
938 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
939 ifp->if_capenable |= IFCAP_TSO6;
941 } else if (mask & IFCAP_TSO6) {
942 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
943 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
945 "TSO6 requires txcsum6, enabling both...\n");
947 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
948 && !(ifp->if_capenable & IFCAP_TSO6)) {
949 if (mask & IFCAP_TXCSUM_IPV6)
950 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
951 else if (mask & IFCAP_TSO6)
952 ifp->if_capenable |= IFCAP_TSO6;
953 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
954 && (ifp->if_capenable & IFCAP_TSO6)) {
955 if (mask & IFCAP_TXCSUM_IPV6) {
956 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
957 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
959 "TSO6 requires txcsum6, disabling both...\n");
960 } else if (mask & IFCAP_TSO6)
961 ifp->if_capenable &= ~IFCAP_TSO6;
965 /*********************************************************************
968 * ixl_ioctl is called when the user wants to configure the
971 * return 0 on success, positive on failure
972 **********************************************************************/
975 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
977 struct ixl_vsi *vsi = ifp->if_softc;
978 struct ixl_pf *pf = vsi->back;
979 struct ifreq *ifr = (struct ifreq *) data;
980 #if defined(INET) || defined(INET6)
981 struct ifaddr *ifa = (struct ifaddr *)data;
982 bool avoid_reset = FALSE;
990 if (ifa->ifa_addr->sa_family == AF_INET)
994 if (ifa->ifa_addr->sa_family == AF_INET6)
997 #if defined(INET) || defined(INET6)
999 ** Calling init results in link renegotiation,
1000 ** so we avoid doing it when possible.
1003 ifp->if_flags |= IFF_UP;
1004 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1007 if (!(ifp->if_flags & IFF_NOARP))
1008 arp_ifinit(ifp, ifa);
1011 error = ether_ioctl(ifp, command, data);
1015 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1016 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1017 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1021 ifp->if_mtu = ifr->ifr_mtu;
1022 vsi->max_frame_size =
1023 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1024 + ETHER_VLAN_ENCAP_LEN;
1025 ixl_init_locked(pf);
1030 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1032 if (ifp->if_flags & IFF_UP) {
1033 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1034 if ((ifp->if_flags ^ pf->if_flags) &
1035 (IFF_PROMISC | IFF_ALLMULTI)) {
1036 ixl_set_promisc(vsi);
1039 ixl_init_locked(pf);
1041 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1043 pf->if_flags = ifp->if_flags;
1047 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1048 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1050 ixl_disable_intr(vsi);
1052 ixl_enable_intr(vsi);
1057 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1058 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1060 ixl_disable_intr(vsi);
1062 ixl_enable_intr(vsi);
1068 #ifdef IFM_ETH_XTYPE
1071 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1072 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1076 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1077 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1079 ixl_cap_txcsum_tso(vsi, ifp, mask);
1081 if (mask & IFCAP_RXCSUM)
1082 ifp->if_capenable ^= IFCAP_RXCSUM;
1083 if (mask & IFCAP_RXCSUM_IPV6)
1084 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1085 if (mask & IFCAP_LRO)
1086 ifp->if_capenable ^= IFCAP_LRO;
1087 if (mask & IFCAP_VLAN_HWTAGGING)
1088 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1089 if (mask & IFCAP_VLAN_HWFILTER)
1090 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1091 if (mask & IFCAP_VLAN_HWTSO)
1092 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1093 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1095 ixl_init_locked(pf);
1098 VLAN_CAPABILITIES(ifp);
1104 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1105 error = ether_ioctl(ifp, command, data);
1113 /*********************************************************************
1116 * This routine is used in two ways. It is used by the stack as
1117 * init entry point in network interface structure. It is also used
1118 * by the driver as a hw/sw initialization routine to get to a
1121 * return 0 on success, positive on failure
1122 **********************************************************************/
1125 ixl_init_locked(struct ixl_pf *pf)
1127 struct i40e_hw *hw = &pf->hw;
1128 struct ixl_vsi *vsi = &pf->vsi;
1129 struct ifnet *ifp = vsi->ifp;
1130 device_t dev = pf->dev;
1131 struct i40e_filter_control_settings filter;
1132 u8 tmpaddr[ETHER_ADDR_LEN];
1135 mtx_assert(&pf->pf_mtx, MA_OWNED);
1136 INIT_DEBUGOUT("ixl_init: begin");
1139 /* Get the latest mac address... User might use a LAA */
1140 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1141 I40E_ETH_LENGTH_OF_ADDRESS);
1142 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1143 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1144 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1145 bcopy(tmpaddr, hw->mac.addr,
1146 I40E_ETH_LENGTH_OF_ADDRESS);
1147 ret = i40e_aq_mac_address_write(hw,
1148 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1149 hw->mac.addr, NULL);
1151 device_printf(dev, "LLA address"
1152 "change failed!!\n");
1155 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1159 /* Set the various hardware offload abilities */
1160 ifp->if_hwassist = 0;
1161 if (ifp->if_capenable & IFCAP_TSO)
1162 ifp->if_hwassist |= CSUM_TSO;
1163 if (ifp->if_capenable & IFCAP_TXCSUM)
1164 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1165 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1166 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1168 /* Set up the device filtering */
1169 bzero(&filter, sizeof(filter));
1170 filter.enable_ethtype = TRUE;
1171 filter.enable_macvlan = TRUE;
1173 filter.enable_fdir = TRUE;
1175 if (i40e_set_filter_control(hw, &filter))
1176 device_printf(dev, "set_filter_control() failed\n");
1179 ixl_config_rss(vsi);
1182 ** Prepare the VSI: rings, hmc contexts, etc...
1184 if (ixl_initialize_vsi(vsi)) {
1185 device_printf(dev, "initialize vsi failed!!\n");
1189 /* Add protocol filters to list */
1190 ixl_init_filters(vsi);
1192 /* Setup vlan's if needed */
1193 ixl_setup_vlan_filters(vsi);
1195 /* Start the local timer */
1196 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1198 /* Set up MSI/X routing and the ITR settings */
1199 if (ixl_enable_msix) {
1200 ixl_configure_msix(pf);
1201 ixl_configure_itr(pf);
1203 ixl_configure_legacy(pf);
1205 ixl_enable_rings(vsi);
1207 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1209 ixl_reconfigure_filters(vsi);
1211 /* Set MTU in hardware*/
1212 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1215 device_printf(vsi->dev,
1216 "aq_set_mac_config in init error, code %d\n",
1219 /* And now turn on interrupts */
1220 ixl_enable_intr(vsi);
1222 /* Now inform the stack we're ready */
1223 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1224 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1232 struct ixl_pf *pf = arg;
1235 ixl_init_locked(pf);
1242 ** MSIX Interrupt Handlers and Tasklets
1246 ixl_handle_que(void *context, int pending)
1248 struct ixl_queue *que = context;
1249 struct ixl_vsi *vsi = que->vsi;
1250 struct i40e_hw *hw = vsi->hw;
1251 struct tx_ring *txr = &que->txr;
1252 struct ifnet *ifp = vsi->ifp;
1255 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1256 more = ixl_rxeof(que, IXL_RX_LIMIT);
1259 if (!drbr_empty(ifp, txr->br))
1260 ixl_mq_start_locked(ifp, txr);
1263 taskqueue_enqueue(que->tq, &que->task);
1268 /* Reenable this interrupt - hmmm */
1269 ixl_enable_queue(hw, que->me);
1274 /*********************************************************************
1276 * Legacy Interrupt Service routine
1278 **********************************************************************/
1282 struct ixl_pf *pf = arg;
1283 struct i40e_hw *hw = &pf->hw;
1284 struct ixl_vsi *vsi = &pf->vsi;
1285 struct ixl_queue *que = vsi->queues;
1286 struct ifnet *ifp = vsi->ifp;
1287 struct tx_ring *txr = &que->txr;
1288 u32 reg, icr0, mask;
1289 bool more_tx, more_rx;
1293 /* Protect against spurious interrupts */
1294 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1297 icr0 = rd32(hw, I40E_PFINT_ICR0);
1299 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1300 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1301 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1303 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1306 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1307 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1310 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1311 taskqueue_enqueue(pf->tq, &pf->adminq);
1315 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1318 more_tx = ixl_txeof(que);
1319 if (!drbr_empty(vsi->ifp, txr->br))
1323 /* re-enable other interrupt causes */
1324 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1326 /* And now the queues */
1327 reg = rd32(hw, I40E_QINT_RQCTL(0));
1328 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1329 wr32(hw, I40E_QINT_RQCTL(0), reg);
1331 reg = rd32(hw, I40E_QINT_TQCTL(0));
1332 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1333 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1334 wr32(hw, I40E_QINT_TQCTL(0), reg);
1336 ixl_enable_legacy(hw);
1342 /*********************************************************************
1344 * MSIX VSI Interrupt Service routine
1346 **********************************************************************/
1348 ixl_msix_que(void *arg)
1350 struct ixl_queue *que = arg;
1351 struct ixl_vsi *vsi = que->vsi;
1352 struct i40e_hw *hw = vsi->hw;
1353 struct tx_ring *txr = &que->txr;
1354 bool more_tx, more_rx;
1356 /* Protect against spurious interrupts */
1357 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1362 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1365 more_tx = ixl_txeof(que);
1367 ** Make certain that if the stack
1368 ** has anything queued the task gets
1369 ** scheduled to handle it.
1371 if (!drbr_empty(vsi->ifp, txr->br))
1375 ixl_set_queue_rx_itr(que);
1376 ixl_set_queue_tx_itr(que);
1378 if (more_tx || more_rx)
1379 taskqueue_enqueue(que->tq, &que->task);
1381 ixl_enable_queue(hw, que->me);
1387 /*********************************************************************
1389 * MSIX Admin Queue Interrupt Service routine
1391 **********************************************************************/
1393 ixl_msix_adminq(void *arg)
1395 struct ixl_pf *pf = arg;
1396 struct i40e_hw *hw = &pf->hw;
1401 reg = rd32(hw, I40E_PFINT_ICR0);
1402 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1404 /* Check on the cause */
1405 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1406 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1408 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1409 ixl_handle_mdd_event(pf);
1410 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1414 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1415 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1416 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1420 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1421 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1422 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1424 taskqueue_enqueue(pf->tq, &pf->adminq);
1428 /*********************************************************************
1430 * Media Ioctl callback
1432 * This routine is called whenever the user queries the status of
1433 * the interface using ifconfig.
1435 **********************************************************************/
1437 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1439 struct ixl_vsi *vsi = ifp->if_softc;
1440 struct ixl_pf *pf = vsi->back;
1441 struct i40e_hw *hw = &pf->hw;
1443 INIT_DEBUGOUT("ixl_media_status: begin");
1446 hw->phy.get_link_info = TRUE;
1447 i40e_get_link_status(hw, &pf->link_up);
1448 ixl_update_link_status(pf);
1450 ifmr->ifm_status = IFM_AVALID;
1451 ifmr->ifm_active = IFM_ETHER;
1458 ifmr->ifm_status |= IFM_ACTIVE;
1459 /* Hardware is always full-duplex */
1460 ifmr->ifm_active |= IFM_FDX;
1462 switch (hw->phy.link_info.phy_type) {
1464 case I40E_PHY_TYPE_100BASE_TX:
1465 ifmr->ifm_active |= IFM_100_TX;
1468 case I40E_PHY_TYPE_1000BASE_T:
1469 ifmr->ifm_active |= IFM_1000_T;
1471 case I40E_PHY_TYPE_1000BASE_SX:
1472 ifmr->ifm_active |= IFM_1000_SX;
1474 case I40E_PHY_TYPE_1000BASE_LX:
1475 ifmr->ifm_active |= IFM_1000_LX;
1478 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1479 ifmr->ifm_active |= IFM_10G_TWINAX;
1481 case I40E_PHY_TYPE_10GBASE_SR:
1482 ifmr->ifm_active |= IFM_10G_SR;
1484 case I40E_PHY_TYPE_10GBASE_LR:
1485 ifmr->ifm_active |= IFM_10G_LR;
1487 case I40E_PHY_TYPE_10GBASE_T:
1488 ifmr->ifm_active |= IFM_10G_T;
1491 case I40E_PHY_TYPE_40GBASE_CR4:
1492 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1493 ifmr->ifm_active |= IFM_40G_CR4;
1495 case I40E_PHY_TYPE_40GBASE_SR4:
1496 ifmr->ifm_active |= IFM_40G_SR4;
1498 case I40E_PHY_TYPE_40GBASE_LR4:
1499 ifmr->ifm_active |= IFM_40G_LR4;
1501 #ifndef IFM_ETH_XTYPE
1502 case I40E_PHY_TYPE_1000BASE_KX:
1503 ifmr->ifm_active |= IFM_1000_CX;
1505 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1506 case I40E_PHY_TYPE_10GBASE_CR1:
1507 ifmr->ifm_active |= IFM_10G_TWINAX;
1509 case I40E_PHY_TYPE_10GBASE_KX4:
1510 ifmr->ifm_active |= IFM_10G_CX4;
1512 case I40E_PHY_TYPE_10GBASE_KR:
1513 ifmr->ifm_active |= IFM_10G_SR;
1515 case I40E_PHY_TYPE_40GBASE_KR4:
1516 case I40E_PHY_TYPE_XLPPI:
1517 ifmr->ifm_active |= IFM_40G_SR4;
1520 case I40E_PHY_TYPE_1000BASE_KX:
1521 ifmr->ifm_active |= IFM_1000_KX;
1523 /* ERJ: What's the difference between these? */
1524 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1525 case I40E_PHY_TYPE_10GBASE_CR1:
1526 ifmr->ifm_active |= IFM_10G_CR1;
1528 case I40E_PHY_TYPE_10GBASE_KX4:
1529 ifmr->ifm_active |= IFM_10G_KX4;
1531 case I40E_PHY_TYPE_10GBASE_KR:
1532 ifmr->ifm_active |= IFM_10G_KR;
1534 case I40E_PHY_TYPE_20GBASE_KR2:
1535 ifmr->ifm_active |= IFM_20G_KR2;
1537 case I40E_PHY_TYPE_40GBASE_KR4:
1538 ifmr->ifm_active |= IFM_40G_KR4;
1540 case I40E_PHY_TYPE_XLPPI:
1541 ifmr->ifm_active |= IFM_40G_XLPPI;
1545 ifmr->ifm_active |= IFM_UNKNOWN;
1548 /* Report flow control status as well */
1549 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1550 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1551 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1552 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1559 /*********************************************************************
1561 * Media Ioctl callback
1563 * This routine is called when the user changes speed/duplex using
1564 * media/mediopt option with ifconfig.
1566 **********************************************************************/
1568 ixl_media_change(struct ifnet * ifp)
1570 struct ixl_vsi *vsi = ifp->if_softc;
1571 struct ifmedia *ifm = &vsi->media;
1573 INIT_DEBUGOUT("ixl_media_change: begin");
1575 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1578 if_printf(ifp, "Media change is currently not supported.\n");
1586 ** ATR: Application Targetted Receive - creates a filter
1587 ** based on TX flow info that will keep the receive
1588 ** portion of the flow on the same queue. Based on the
1589 ** implementation this is only available for TCP connections
1592 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1594 struct ixl_vsi *vsi = que->vsi;
1595 struct tx_ring *txr = &que->txr;
1596 struct i40e_filter_program_desc *FDIR;
1600 /* check if ATR is enabled and sample rate */
1601 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1604 ** We sample all TCP SYN/FIN packets,
1605 ** or at the selected sample rate
1608 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1609 (txr->atr_count < txr->atr_rate))
1613 /* Get a descriptor to use */
1614 idx = txr->next_avail;
1615 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1616 if (++idx == que->num_desc)
1619 txr->next_avail = idx;
1621 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1622 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1624 ptype |= (etype == ETHERTYPE_IP) ?
1625 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1626 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1627 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1628 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1630 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1632 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1635 ** We use the TCP TH_FIN as a trigger to remove
1636 ** the filter, otherwise its an update.
1638 dtype |= (th->th_flags & TH_FIN) ?
1639 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1640 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1641 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1642 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1644 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1645 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1647 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1648 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1650 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1651 FDIR->dtype_cmd_cntindex = htole32(dtype);
1658 ixl_set_promisc(struct ixl_vsi *vsi)
1660 struct ifnet *ifp = vsi->ifp;
1661 struct i40e_hw *hw = vsi->hw;
1663 bool uni = FALSE, multi = FALSE;
1665 if (ifp->if_flags & IFF_ALLMULTI)
1667 else { /* Need to count the multicast addresses */
1668 struct ifmultiaddr *ifma;
1669 if_maddr_rlock(ifp);
1670 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1671 if (ifma->ifma_addr->sa_family != AF_LINK)
1673 if (mcnt == MAX_MULTICAST_ADDR)
1677 if_maddr_runlock(ifp);
1680 if (mcnt >= MAX_MULTICAST_ADDR)
1682 if (ifp->if_flags & IFF_PROMISC)
1685 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1686 vsi->seid, uni, NULL);
1687 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1688 vsi->seid, multi, NULL);
1692 /*********************************************************************
1695 * Routines for multicast and vlan filter management.
1697 *********************************************************************/
1699 ixl_add_multi(struct ixl_vsi *vsi)
1701 struct ifmultiaddr *ifma;
1702 struct ifnet *ifp = vsi->ifp;
1703 struct i40e_hw *hw = vsi->hw;
1704 int mcnt = 0, flags;
1706 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1708 if_maddr_rlock(ifp);
1710 ** First just get a count, to decide if we
1711 ** we simply use multicast promiscuous.
1713 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1714 if (ifma->ifma_addr->sa_family != AF_LINK)
1718 if_maddr_runlock(ifp);
1720 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1721 /* delete existing MC filters */
1722 ixl_del_hw_filters(vsi, mcnt);
1723 i40e_aq_set_vsi_multicast_promiscuous(hw,
1724 vsi->seid, TRUE, NULL);
1729 if_maddr_rlock(ifp);
1730 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1731 if (ifma->ifma_addr->sa_family != AF_LINK)
1733 ixl_add_mc_filter(vsi,
1734 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1737 if_maddr_runlock(ifp);
1739 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1740 ixl_add_hw_filters(vsi, flags, mcnt);
1743 IOCTL_DEBUGOUT("ixl_add_multi: end");
1748 ixl_del_multi(struct ixl_vsi *vsi)
1750 struct ifnet *ifp = vsi->ifp;
1751 struct ifmultiaddr *ifma;
1752 struct ixl_mac_filter *f;
1756 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1758 /* Search for removed multicast addresses */
1759 if_maddr_rlock(ifp);
1760 SLIST_FOREACH(f, &vsi->ftl, next) {
1761 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1763 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1764 if (ifma->ifma_addr->sa_family != AF_LINK)
1766 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1767 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1772 if (match == FALSE) {
1773 f->flags |= IXL_FILTER_DEL;
1778 if_maddr_runlock(ifp);
1781 ixl_del_hw_filters(vsi, mcnt);
1785 /*********************************************************************
1788 * This routine checks for link status,updates statistics,
1789 * and runs the watchdog check.
1791 **********************************************************************/
1794 ixl_local_timer(void *arg)
1796 struct ixl_pf *pf = arg;
1797 struct i40e_hw *hw = &pf->hw;
1798 struct ixl_vsi *vsi = &pf->vsi;
1799 struct ixl_queue *que = vsi->queues;
1800 device_t dev = pf->dev;
1804 mtx_assert(&pf->pf_mtx, MA_OWNED);
1806 /* Fire off the adminq task */
1807 taskqueue_enqueue(pf->tq, &pf->adminq);
1810 ixl_update_stats_counters(pf);
1813 ** Check status of the queues
1815 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1816 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1818 for (int i = 0; i < vsi->num_queues; i++,que++) {
1819 /* Any queues with outstanding work get a sw irq */
1821 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1823 ** Each time txeof runs without cleaning, but there
1824 ** are uncleaned descriptors it increments busy. If
1825 ** we get to 5 we declare it hung.
1827 if (que->busy == IXL_QUEUE_HUNG) {
1829 /* Mark the queue as inactive */
1830 vsi->active_queues &= ~((u64)1 << que->me);
1833 /* Check if we've come back from hung */
1834 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1835 vsi->active_queues |= ((u64)1 << que->me);
1837 if (que->busy >= IXL_MAX_TX_BUSY) {
1839 device_printf(dev,"Warning queue %d "
1840 "appears to be hung!\n", i);
1842 que->busy = IXL_QUEUE_HUNG;
1846 /* Only reinit if all queues show hung */
1847 if (hung == vsi->num_queues)
1850 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1854 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1855 ixl_init_locked(pf);
1859 ** Note: this routine updates the OS on the link state
1860 ** the real check of the hardware only happens with
1861 ** a link interrupt.
1864 ixl_update_link_status(struct ixl_pf *pf)
1866 struct ixl_vsi *vsi = &pf->vsi;
1867 struct i40e_hw *hw = &pf->hw;
1868 struct ifnet *ifp = vsi->ifp;
1869 device_t dev = pf->dev;
1872 if (vsi->link_active == FALSE) {
1873 pf->fc = hw->fc.current_mode;
1875 device_printf(dev,"Link is up %d Gbps %s,"
1876 " Flow Control: %s\n",
1878 I40E_LINK_SPEED_40GB)? 40:10),
1879 "Full Duplex", ixl_fc_string[pf->fc]);
1881 vsi->link_active = TRUE;
1883 ** Warn user if link speed on NPAR enabled
1884 ** partition is not at least 10GB
1886 if (hw->func_caps.npar_enable &&
1887 (hw->phy.link_info.link_speed ==
1888 I40E_LINK_SPEED_1GB ||
1889 hw->phy.link_info.link_speed ==
1890 I40E_LINK_SPEED_100MB))
1891 device_printf(dev, "The partition detected"
1892 "link speed that is less than 10Gbps\n");
1893 if_link_state_change(ifp, LINK_STATE_UP);
1895 } else { /* Link down */
1896 if (vsi->link_active == TRUE) {
1898 device_printf(dev,"Link is Down\n");
1899 if_link_state_change(ifp, LINK_STATE_DOWN);
1900 vsi->link_active = FALSE;
1907 /*********************************************************************
1909 * This routine disables all traffic on the adapter by issuing a
1910 * global reset on the MAC and deallocates TX/RX buffers.
1912 **********************************************************************/
1915 ixl_stop(struct ixl_pf *pf)
1917 struct ixl_vsi *vsi = &pf->vsi;
1918 struct ifnet *ifp = vsi->ifp;
1920 mtx_assert(&pf->pf_mtx, MA_OWNED);
1922 INIT_DEBUGOUT("ixl_stop: begin\n");
1923 if (pf->num_vfs == 0)
1924 ixl_disable_intr(vsi);
1926 ixl_disable_rings_intr(vsi);
1927 ixl_disable_rings(vsi);
1929 /* Tell the stack that the interface is no longer active */
1930 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1932 /* Stop the local timer */
1933 callout_stop(&pf->timer);
1939 /*********************************************************************
1941 * Setup MSIX Interrupt resources and handlers for the VSI
1943 **********************************************************************/
1945 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1947 device_t dev = pf->dev;
1948 struct ixl_vsi *vsi = &pf->vsi;
1949 struct ixl_queue *que = vsi->queues;
1954 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1955 &rid, RF_SHAREABLE | RF_ACTIVE);
1956 if (pf->res == NULL) {
1957 device_printf(dev,"Unable to allocate"
1958 " bus resource: vsi legacy/msi interrupt\n");
1962 /* Set the handler function */
1963 error = bus_setup_intr(dev, pf->res,
1964 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1965 ixl_intr, pf, &pf->tag);
1968 device_printf(dev, "Failed to register legacy/msi handler");
1971 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1972 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1973 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1974 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1975 taskqueue_thread_enqueue, &que->tq);
1976 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1977 device_get_nameunit(dev));
1978 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1981 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1984 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1985 taskqueue_thread_enqueue, &pf->tq);
1986 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1987 device_get_nameunit(dev));
1993 /*********************************************************************
1995 * Setup MSIX Interrupt resources and handlers for the VSI
1997 **********************************************************************/
1999 ixl_assign_vsi_msix(struct ixl_pf *pf)
2001 device_t dev = pf->dev;
2002 struct ixl_vsi *vsi = &pf->vsi;
2003 struct ixl_queue *que = vsi->queues;
2004 struct tx_ring *txr;
2005 int error, rid, vector = 0;
2007 /* Admin Que is vector 0*/
2009 pf->res = bus_alloc_resource_any(dev,
2010 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2012 device_printf(dev,"Unable to allocate"
2013 " bus resource: Adminq interrupt [%d]\n", rid);
2016 /* Set the adminq vector and handler */
2017 error = bus_setup_intr(dev, pf->res,
2018 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2019 ixl_msix_adminq, pf, &pf->tag);
2022 device_printf(dev, "Failed to register Admin que handler");
2025 bus_describe_intr(dev, pf->res, pf->tag, "aq");
2026 pf->admvec = vector;
2027 /* Tasklet for Admin Queue */
2028 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2031 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2034 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2035 taskqueue_thread_enqueue, &pf->tq);
2036 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2037 device_get_nameunit(pf->dev));
2040 /* Now set up the stations */
2041 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2045 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2046 RF_SHAREABLE | RF_ACTIVE);
2047 if (que->res == NULL) {
2048 device_printf(dev,"Unable to allocate"
2049 " bus resource: que interrupt [%d]\n", vector);
2052 /* Set the handler function */
2053 error = bus_setup_intr(dev, que->res,
2054 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2055 ixl_msix_que, que, &que->tag);
2058 device_printf(dev, "Failed to register que handler");
2061 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2062 /* Bind the vector to a CPU */
2064 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2066 bus_bind_intr(dev, que->res, cpu_id);
2068 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2069 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2070 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2071 taskqueue_thread_enqueue, &que->tq);
2073 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
2074 cpu_id, "%s (bucket %d)",
2075 device_get_nameunit(dev), cpu_id);
2077 taskqueue_start_threads(&que->tq, 1, PI_NET,
2078 "%s que", device_get_nameunit(dev));
2087 * Allocate MSI/X vectors
2090 ixl_init_msix(struct ixl_pf *pf)
2092 device_t dev = pf->dev;
2093 int rid, want, vectors, queues, available;
2095 /* Override by tuneable */
2096 if (ixl_enable_msix == 0)
2100 ** When used in a virtualized environment
2101 ** PCI BUSMASTER capability may not be set
2102 ** so explicity set it here and rewrite
2103 ** the ENABLE in the MSIX control register
2104 ** at this point to cause the host to
2105 ** successfully initialize us.
2110 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2111 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2112 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2113 pci_find_cap(dev, PCIY_MSIX, &rid);
2114 rid += PCIR_MSIX_CTRL;
2115 msix_ctrl = pci_read_config(dev, rid, 2);
2116 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2117 pci_write_config(dev, rid, msix_ctrl, 2);
2120 /* First try MSI/X */
2121 rid = PCIR_BAR(IXL_BAR);
2122 pf->msix_mem = bus_alloc_resource_any(dev,
2123 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2124 if (!pf->msix_mem) {
2125 /* May not be enabled */
2126 device_printf(pf->dev,
2127 "Unable to map MSIX table \n");
2131 available = pci_msix_count(dev);
2132 if (available == 0) { /* system has msix disabled */
2133 bus_release_resource(dev, SYS_RES_MEMORY,
2135 pf->msix_mem = NULL;
2139 /* Figure out a reasonable auto config value */
2140 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2142 /* Override with hardcoded value if sane */
2143 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2144 queues = ixl_max_queues;
2147 /* If we're doing RSS, clamp at the number of RSS buckets */
2148 if (queues > rss_getnumbuckets())
2149 queues = rss_getnumbuckets();
2153 ** Want one vector (RX/TX pair) per queue
2154 ** plus an additional for the admin queue.
2157 if (want <= available) /* Have enough */
2160 device_printf(pf->dev,
2161 "MSIX Configuration Problem, "
2162 "%d vectors available but %d wanted!\n",
2164 return (0); /* Will go to Legacy setup */
2167 if (pci_alloc_msix(dev, &vectors) == 0) {
2168 device_printf(pf->dev,
2169 "Using MSIX interrupts with %d vectors\n", vectors);
2171 pf->vsi.num_queues = queues;
2174 * If we're doing RSS, the number of queues needs to
2175 * match the number of RSS buckets that are configured.
2177 * + If there's more queues than RSS buckets, we'll end
2178 * up with queues that get no traffic.
2180 * + If there's more RSS buckets than queues, we'll end
2181 * up having multiple RSS buckets map to the same queue,
2182 * so there'll be some contention.
2184 if (queues != rss_getnumbuckets()) {
2186 "%s: queues (%d) != RSS buckets (%d)"
2187 "; performance will be impacted.\n",
2188 __func__, queues, rss_getnumbuckets());
2194 vectors = pci_msi_count(dev);
2195 pf->vsi.num_queues = 1;
2198 ixl_enable_msix = 0;
2199 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2200 device_printf(pf->dev,"Using an MSI interrupt\n");
2203 device_printf(pf->dev,"Using a Legacy interrupt\n");
2210 * Plumb MSI/X vectors
2213 ixl_configure_msix(struct ixl_pf *pf)
2215 struct i40e_hw *hw = &pf->hw;
2216 struct ixl_vsi *vsi = &pf->vsi;
2220 /* First set up the adminq - vector 0 */
2221 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2222 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2224 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2225 I40E_PFINT_ICR0_ENA_GRST_MASK |
2226 I40E_PFINT_ICR0_HMC_ERR_MASK |
2227 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2228 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2229 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2230 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2231 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2233 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2234 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2236 wr32(hw, I40E_PFINT_DYN_CTL0,
2237 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2238 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2240 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2242 /* Next configure the queues */
2243 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2244 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2245 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2247 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2248 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2249 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2250 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2251 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2252 wr32(hw, I40E_QINT_RQCTL(i), reg);
2254 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2255 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2256 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2257 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2258 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2259 if (i == (vsi->num_queues - 1))
2260 reg |= (IXL_QUEUE_EOL
2261 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2262 wr32(hw, I40E_QINT_TQCTL(i), reg);
2267 * Configure for MSI single vector operation
2270 ixl_configure_legacy(struct ixl_pf *pf)
2272 struct i40e_hw *hw = &pf->hw;
2276 wr32(hw, I40E_PFINT_ITR0(0), 0);
2277 wr32(hw, I40E_PFINT_ITR0(1), 0);
2280 /* Setup "other" causes */
2281 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2282 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2283 | I40E_PFINT_ICR0_ENA_GRST_MASK
2284 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2285 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2286 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2287 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2288 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2289 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2290 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2292 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2294 /* SW_ITR_IDX = 0, but don't change INTENA */
2295 wr32(hw, I40E_PFINT_DYN_CTL0,
2296 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2297 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2298 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2299 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2301 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2302 wr32(hw, I40E_PFINT_LNKLST0, 0);
2304 /* Associate the queue pair to the vector and enable the q int */
2305 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2306 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2307 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2308 wr32(hw, I40E_QINT_RQCTL(0), reg);
2310 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2311 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2312 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2313 wr32(hw, I40E_QINT_TQCTL(0), reg);
2315 /* Next enable the queue pair */
2316 reg = rd32(hw, I40E_QTX_ENA(0));
2317 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2318 wr32(hw, I40E_QTX_ENA(0), reg);
2320 reg = rd32(hw, I40E_QRX_ENA(0));
2321 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2322 wr32(hw, I40E_QRX_ENA(0), reg);
2327 * Set the Initial ITR state
2330 ixl_configure_itr(struct ixl_pf *pf)
2332 struct i40e_hw *hw = &pf->hw;
2333 struct ixl_vsi *vsi = &pf->vsi;
2334 struct ixl_queue *que = vsi->queues;
2336 vsi->rx_itr_setting = ixl_rx_itr;
2337 if (ixl_dynamic_rx_itr)
2338 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2339 vsi->tx_itr_setting = ixl_tx_itr;
2340 if (ixl_dynamic_tx_itr)
2341 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2343 for (int i = 0; i < vsi->num_queues; i++, que++) {
2344 struct tx_ring *txr = &que->txr;
2345 struct rx_ring *rxr = &que->rxr;
2347 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2348 vsi->rx_itr_setting);
2349 rxr->itr = vsi->rx_itr_setting;
2350 rxr->latency = IXL_AVE_LATENCY;
2351 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2352 vsi->tx_itr_setting);
2353 txr->itr = vsi->tx_itr_setting;
2354 txr->latency = IXL_AVE_LATENCY;
2360 ixl_allocate_pci_resources(struct ixl_pf *pf)
2363 device_t dev = pf->dev;
2366 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2369 if (!(pf->pci_mem)) {
2370 device_printf(dev,"Unable to allocate bus resource: memory\n");
2374 pf->osdep.mem_bus_space_tag =
2375 rman_get_bustag(pf->pci_mem);
2376 pf->osdep.mem_bus_space_handle =
2377 rman_get_bushandle(pf->pci_mem);
2378 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2379 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2380 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2382 pf->hw.back = &pf->osdep;
2385 ** Now setup MSI or MSI/X, should
2386 ** return us the number of supported
2387 ** vectors. (Will be 1 for MSI)
2389 pf->msix = ixl_init_msix(pf);
2394 ixl_free_pci_resources(struct ixl_pf * pf)
2396 struct ixl_vsi *vsi = &pf->vsi;
2397 struct ixl_queue *que = vsi->queues;
2398 device_t dev = pf->dev;
2401 memrid = PCIR_BAR(IXL_BAR);
2403 /* We may get here before stations are setup */
2404 if ((!ixl_enable_msix) || (que == NULL))
2408 ** Release all msix VSI resources:
2410 for (int i = 0; i < vsi->num_queues; i++, que++) {
2411 rid = que->msix + 1;
2412 if (que->tag != NULL) {
2413 bus_teardown_intr(dev, que->res, que->tag);
2416 if (que->res != NULL)
2417 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2421 /* Clean the AdminQ interrupt last */
2422 if (pf->admvec) /* we are doing MSIX */
2423 rid = pf->admvec + 1;
2425 (pf->msix != 0) ? (rid = 1):(rid = 0);
2427 if (pf->tag != NULL) {
2428 bus_teardown_intr(dev, pf->res, pf->tag);
2431 if (pf->res != NULL)
2432 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2435 pci_release_msi(dev);
2437 if (pf->msix_mem != NULL)
2438 bus_release_resource(dev, SYS_RES_MEMORY,
2439 memrid, pf->msix_mem);
2441 if (pf->pci_mem != NULL)
2442 bus_release_resource(dev, SYS_RES_MEMORY,
2443 PCIR_BAR(0), pf->pci_mem);
2449 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2451 /* Display supported media types */
2452 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2453 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2455 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2456 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2457 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2458 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2459 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2460 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2462 if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2463 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2464 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2465 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2467 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2468 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2469 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2470 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2471 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2472 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2474 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2475 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2476 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2477 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2478 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2479 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2480 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2481 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2482 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2483 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2485 #ifndef IFM_ETH_XTYPE
2486 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2487 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2489 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2490 phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2491 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2492 phy_type & (1 << I40E_PHY_TYPE_SFI))
2493 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2494 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2495 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2496 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2497 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2499 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2500 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2501 if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2502 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2504 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2505 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2507 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2508 || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2509 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2510 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2511 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2512 if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2513 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2514 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2515 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2516 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2517 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2519 if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2520 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2522 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2523 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2524 if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2525 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2529 /*********************************************************************
2531 * Setup networking device structure and register an interface.
2533 **********************************************************************/
2535 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2538 struct i40e_hw *hw = vsi->hw;
2539 struct ixl_queue *que = vsi->queues;
2540 struct i40e_aq_get_phy_abilities_resp abilities;
2541 enum i40e_status_code aq_error = 0;
2543 INIT_DEBUGOUT("ixl_setup_interface: begin");
2545 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2547 device_printf(dev, "can not allocate ifnet structure\n");
2550 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2551 ifp->if_mtu = ETHERMTU;
2552 if_initbaudrate(ifp, IF_Gbps(40));
2553 ifp->if_init = ixl_init;
2554 ifp->if_softc = vsi;
2555 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2556 ifp->if_ioctl = ixl_ioctl;
2558 #if __FreeBSD_version >= 1100036
2559 if_setgetcounterfn(ifp, ixl_get_counter);
2562 ifp->if_transmit = ixl_mq_start;
2564 ifp->if_qflush = ixl_qflush;
2566 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2568 vsi->max_frame_size =
2569 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2570 + ETHER_VLAN_ENCAP_LEN;
2573 * Tell the upper layer(s) we support long frames.
2575 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2577 ifp->if_capabilities |= IFCAP_HWCSUM;
2578 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2579 ifp->if_capabilities |= IFCAP_TSO;
2580 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2581 ifp->if_capabilities |= IFCAP_LRO;
2583 /* VLAN capabilties */
2584 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2587 | IFCAP_VLAN_HWCSUM;
2588 ifp->if_capenable = ifp->if_capabilities;
2591 ** Don't turn this on by default, if vlans are
2592 ** created on another pseudo device (eg. lagg)
2593 ** then vlan events are not passed thru, breaking
2594 ** operation, but with HW FILTER off it works. If
2595 ** using vlans directly on the ixl driver you can
2596 ** enable this and get full hardware tag filtering.
2598 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2601 * Specify the media types supported by this adapter and register
2602 * callbacks to update media and link information
2604 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2607 aq_error = i40e_aq_get_phy_capabilities(hw,
2608 FALSE, TRUE, &abilities, NULL);
2609 /* May need delay to detect fiber correctly */
2610 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2611 i40e_msec_delay(200);
2612 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2613 TRUE, &abilities, NULL);
2616 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2617 device_printf(dev, "Unknown PHY type detected!\n");
2620 "Error getting supported media types, err %d,"
2621 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2625 ixl_add_ifmedia(vsi, abilities.phy_type);
2627 /* Use autoselect media by default */
2628 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2629 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2631 ether_ifattach(ifp, hw->mac.addr);
2637 ** Run when the Admin Queue gets a
2638 ** link transition interrupt.
2641 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2643 struct i40e_hw *hw = &pf->hw;
2644 struct i40e_aqc_get_link_status *status =
2645 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2648 hw->phy.get_link_info = TRUE;
2649 i40e_get_link_status(hw, &check);
2650 pf->link_up = check;
2652 printf("Link is %s\n", check ? "up":"down");
2654 /* Report if Unqualified modules are found */
2655 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2656 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2657 (!(status->link_info & I40E_AQ_LINK_UP)))
2658 device_printf(pf->dev, "Link failed because "
2659 "an unqualified module was detected\n");
2664 /*********************************************************************
2666 * Get Firmware Switch configuration
2667 * - this will need to be more robust when more complex
2668 * switch configurations are enabled.
2670 **********************************************************************/
2672 ixl_switch_config(struct ixl_pf *pf)
2674 struct i40e_hw *hw = &pf->hw;
2675 struct ixl_vsi *vsi = &pf->vsi;
2676 device_t dev = vsi->dev;
2677 struct i40e_aqc_get_switch_config_resp *sw_config;
2678 u8 aq_buf[I40E_AQ_LARGE_BUF];
2682 memset(&aq_buf, 0, sizeof(aq_buf));
2683 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2684 ret = i40e_aq_get_switch_config(hw, sw_config,
2685 sizeof(aq_buf), &next, NULL);
2687 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2693 "Switch config: header reported: %d in structure, %d total\n",
2694 sw_config->header.num_reported, sw_config->header.num_total);
2695 for (int i = 0; i < sw_config->header.num_reported; i++) {
2697 "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2698 sw_config->element[i].element_type,
2699 sw_config->element[i].seid,
2700 sw_config->element[i].uplink_seid,
2701 sw_config->element[i].downlink_seid);
2704 /* Simplified due to a single VSI at the moment */
2705 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2706 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2707 vsi->seid = sw_config->element[0].seid;
2711 /*********************************************************************
2713 * Initialize the VSI: this handles contexts, which means things
2714 * like the number of descriptors, buffer size,
2715 * plus we init the rings thru this function.
2717 **********************************************************************/
2719 ixl_initialize_vsi(struct ixl_vsi *vsi)
2721 struct ixl_pf *pf = vsi->back;
2722 struct ixl_queue *que = vsi->queues;
2723 device_t dev = vsi->dev;
2724 struct i40e_hw *hw = vsi->hw;
2725 struct i40e_vsi_context ctxt;
2728 memset(&ctxt, 0, sizeof(ctxt));
2729 ctxt.seid = vsi->seid;
2730 if (pf->veb_seid != 0)
2731 ctxt.uplink_seid = pf->veb_seid;
2732 ctxt.pf_num = hw->pf_id;
2733 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2735 device_printf(dev,"get vsi params failed %x!!\n", err);
2739 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2740 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2741 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2742 ctxt.uplink_seid, ctxt.vsi_number,
2743 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2744 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2745 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2748 ** Set the queue and traffic class bits
2749 ** - when multiple traffic classes are supported
2750 ** this will need to be more robust.
2752 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2753 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2754 ctxt.info.queue_mapping[0] = 0;
2755 ctxt.info.tc_mapping[0] = 0x0800;
2757 /* Set VLAN receive stripping mode */
2758 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2759 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2760 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2761 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2763 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2765 /* Keep copy of VSI info in VSI for statistic counters */
2766 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2768 /* Reset VSI statistics */
2769 ixl_vsi_reset_stats(vsi);
2770 vsi->hw_filters_add = 0;
2771 vsi->hw_filters_del = 0;
2773 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2775 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2777 device_printf(dev,"update vsi params failed %x!!\n",
2778 hw->aq.asq_last_status);
2782 for (int i = 0; i < vsi->num_queues; i++, que++) {
2783 struct tx_ring *txr = &que->txr;
2784 struct rx_ring *rxr = &que->rxr;
2785 struct i40e_hmc_obj_txq tctx;
2786 struct i40e_hmc_obj_rxq rctx;
2791 /* Setup the HMC TX Context */
2792 size = que->num_desc * sizeof(struct i40e_tx_desc);
2793 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2794 tctx.new_context = 1;
2795 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2796 tctx.qlen = que->num_desc;
2798 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2799 /* Enable HEAD writeback */
2800 tctx.head_wb_ena = 1;
2801 tctx.head_wb_addr = txr->dma.pa +
2802 (que->num_desc * sizeof(struct i40e_tx_desc));
2803 tctx.rdylist_act = 0;
2804 err = i40e_clear_lan_tx_queue_context(hw, i);
2806 device_printf(dev, "Unable to clear TX context\n");
2809 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2811 device_printf(dev, "Unable to set TX context\n");
2814 /* Associate the ring with this PF */
2815 txctl = I40E_QTX_CTL_PF_QUEUE;
2816 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2817 I40E_QTX_CTL_PF_INDX_MASK);
2818 wr32(hw, I40E_QTX_CTL(i), txctl);
2821 /* Do ring (re)init */
2822 ixl_init_tx_ring(que);
2824 /* Next setup the HMC RX Context */
2825 if (vsi->max_frame_size <= MCLBYTES)
2826 rxr->mbuf_sz = MCLBYTES;
2828 rxr->mbuf_sz = MJUMPAGESIZE;
2830 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2832 /* Set up an RX context for the HMC */
2833 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2834 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2835 /* ignore header split for now */
2836 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2837 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2838 vsi->max_frame_size : max_rxmax;
2840 rctx.dsize = 1; /* do 32byte descriptors */
2841 rctx.hsplit_0 = 0; /* no HDR split initially */
2842 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2843 rctx.qlen = que->num_desc;
2844 rctx.tphrdesc_ena = 1;
2845 rctx.tphwdesc_ena = 1;
2846 rctx.tphdata_ena = 0;
2847 rctx.tphhead_ena = 0;
2848 rctx.lrxqthresh = 2;
2855 err = i40e_clear_lan_rx_queue_context(hw, i);
2858 "Unable to clear RX context %d\n", i);
2861 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2863 device_printf(dev, "Unable to set RX context %d\n", i);
2866 err = ixl_init_rx_ring(que);
2868 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2871 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2873 /* preserve queue */
2874 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2875 struct netmap_adapter *na = NA(vsi->ifp);
2876 struct netmap_kring *kring = &na->rx_rings[i];
2877 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2878 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2880 #endif /* DEV_NETMAP */
2881 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2887 /*********************************************************************
2889 * Free all VSI structs.
2891 **********************************************************************/
2893 ixl_free_vsi(struct ixl_vsi *vsi)
2895 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2896 struct ixl_queue *que = vsi->queues;
2898 /* Free station queues */
2899 for (int i = 0; i < vsi->num_queues; i++, que++) {
2900 struct tx_ring *txr = &que->txr;
2901 struct rx_ring *rxr = &que->rxr;
2903 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2906 ixl_free_que_tx(que);
2908 i40e_free_dma_mem(&pf->hw, &txr->dma);
2910 IXL_TX_LOCK_DESTROY(txr);
2912 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2915 ixl_free_que_rx(que);
2917 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2919 IXL_RX_LOCK_DESTROY(rxr);
2922 free(vsi->queues, M_DEVBUF);
2924 /* Free VSI filter list */
2925 ixl_free_mac_filters(vsi);
2929 ixl_free_mac_filters(struct ixl_vsi *vsi)
2931 struct ixl_mac_filter *f;
2933 while (!SLIST_EMPTY(&vsi->ftl)) {
2934 f = SLIST_FIRST(&vsi->ftl);
2935 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2941 /*********************************************************************
2943 * Allocate memory for the VSI (virtual station interface) and their
2944 * associated queues, rings and the descriptors associated with each,
2945 * called only once at attach.
2947 **********************************************************************/
2949 ixl_setup_stations(struct ixl_pf *pf)
2951 device_t dev = pf->dev;
2952 struct ixl_vsi *vsi;
2953 struct ixl_queue *que;
2954 struct tx_ring *txr;
2955 struct rx_ring *rxr;
2957 int error = I40E_SUCCESS;
2960 vsi->back = (void *)pf;
2966 /* Get memory for the station queues */
2968 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2969 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2970 device_printf(dev, "Unable to allocate queue memory\n");
2975 for (int i = 0; i < vsi->num_queues; i++) {
2976 que = &vsi->queues[i];
2977 que->num_desc = ixl_ringsz;
2980 /* mark the queue as active */
2981 vsi->active_queues |= (u64)1 << que->me;
2984 txr->tail = I40E_QTX_TAIL(que->me);
2986 /* Initialize the TX lock */
2987 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2988 device_get_nameunit(dev), que->me);
2989 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2990 /* Create the TX descriptor ring */
2991 tsize = roundup2((que->num_desc *
2992 sizeof(struct i40e_tx_desc)) +
2993 sizeof(u32), DBA_ALIGN);
2994 if (i40e_allocate_dma_mem(&pf->hw,
2995 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2997 "Unable to allocate TX Descriptor memory\n");
3001 txr->base = (struct i40e_tx_desc *)txr->dma.va;
3002 bzero((void *)txr->base, tsize);
3003 /* Now allocate transmit soft structs for the ring */
3004 if (ixl_allocate_tx_data(que)) {
3006 "Critical Failure setting up TX structures\n");
3010 /* Allocate a buf ring */
3011 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3012 M_WAITOK, &txr->mtx);
3013 if (txr->br == NULL) {
3015 "Critical Failure setting up TX buf ring\n");
3021 * Next the RX queues...
3023 rsize = roundup2(que->num_desc *
3024 sizeof(union i40e_rx_desc), DBA_ALIGN);
3027 rxr->tail = I40E_QRX_TAIL(que->me);
3029 /* Initialize the RX side lock */
3030 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3031 device_get_nameunit(dev), que->me);
3032 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3034 if (i40e_allocate_dma_mem(&pf->hw,
3035 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3037 "Unable to allocate RX Descriptor memory\n");
3041 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3042 bzero((void *)rxr->base, rsize);
3044 /* Allocate receive soft structs for the ring*/
3045 if (ixl_allocate_rx_data(que)) {
3047 "Critical Failure setting up receive structs\n");
3056 for (int i = 0; i < vsi->num_queues; i++) {
3057 que = &vsi->queues[i];
3061 i40e_free_dma_mem(&pf->hw, &rxr->dma);
3063 i40e_free_dma_mem(&pf->hw, &txr->dma);
3071 ** Provide a update to the queue RX
3072 ** interrupt moderation value.
3075 ixl_set_queue_rx_itr(struct ixl_queue *que)
3077 struct ixl_vsi *vsi = que->vsi;
3078 struct i40e_hw *hw = vsi->hw;
3079 struct rx_ring *rxr = &que->rxr;
3085 /* Idle, do nothing */
3086 if (rxr->bytes == 0)
3089 if (ixl_dynamic_rx_itr) {
3090 rx_bytes = rxr->bytes/rxr->itr;
3093 /* Adjust latency range */
3094 switch (rxr->latency) {
3095 case IXL_LOW_LATENCY:
3096 if (rx_bytes > 10) {
3097 rx_latency = IXL_AVE_LATENCY;
3098 rx_itr = IXL_ITR_20K;
3101 case IXL_AVE_LATENCY:
3102 if (rx_bytes > 20) {
3103 rx_latency = IXL_BULK_LATENCY;
3104 rx_itr = IXL_ITR_8K;
3105 } else if (rx_bytes <= 10) {
3106 rx_latency = IXL_LOW_LATENCY;
3107 rx_itr = IXL_ITR_100K;
3110 case IXL_BULK_LATENCY:
3111 if (rx_bytes <= 20) {
3112 rx_latency = IXL_AVE_LATENCY;
3113 rx_itr = IXL_ITR_20K;
3118 rxr->latency = rx_latency;
3120 if (rx_itr != rxr->itr) {
3121 /* do an exponential smoothing */
3122 rx_itr = (10 * rx_itr * rxr->itr) /
3123 ((9 * rx_itr) + rxr->itr);
3124 rxr->itr = rx_itr & IXL_MAX_ITR;
3125 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3126 que->me), rxr->itr);
3128 } else { /* We may have have toggled to non-dynamic */
3129 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3130 vsi->rx_itr_setting = ixl_rx_itr;
3131 /* Update the hardware if needed */
3132 if (rxr->itr != vsi->rx_itr_setting) {
3133 rxr->itr = vsi->rx_itr_setting;
3134 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3135 que->me), rxr->itr);
3145 ** Provide a update to the queue TX
3146 ** interrupt moderation value.
3149 ixl_set_queue_tx_itr(struct ixl_queue *que)
3151 struct ixl_vsi *vsi = que->vsi;
3152 struct i40e_hw *hw = vsi->hw;
3153 struct tx_ring *txr = &que->txr;
3159 /* Idle, do nothing */
3160 if (txr->bytes == 0)
3163 if (ixl_dynamic_tx_itr) {
3164 tx_bytes = txr->bytes/txr->itr;
3167 switch (txr->latency) {
3168 case IXL_LOW_LATENCY:
3169 if (tx_bytes > 10) {
3170 tx_latency = IXL_AVE_LATENCY;
3171 tx_itr = IXL_ITR_20K;
3174 case IXL_AVE_LATENCY:
3175 if (tx_bytes > 20) {
3176 tx_latency = IXL_BULK_LATENCY;
3177 tx_itr = IXL_ITR_8K;
3178 } else if (tx_bytes <= 10) {
3179 tx_latency = IXL_LOW_LATENCY;
3180 tx_itr = IXL_ITR_100K;
3183 case IXL_BULK_LATENCY:
3184 if (tx_bytes <= 20) {
3185 tx_latency = IXL_AVE_LATENCY;
3186 tx_itr = IXL_ITR_20K;
3191 txr->latency = tx_latency;
3193 if (tx_itr != txr->itr) {
3194 /* do an exponential smoothing */
3195 tx_itr = (10 * tx_itr * txr->itr) /
3196 ((9 * tx_itr) + txr->itr);
3197 txr->itr = tx_itr & IXL_MAX_ITR;
3198 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3199 que->me), txr->itr);
3202 } else { /* We may have have toggled to non-dynamic */
3203 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3204 vsi->tx_itr_setting = ixl_tx_itr;
3205 /* Update the hardware if needed */
3206 if (txr->itr != vsi->tx_itr_setting) {
3207 txr->itr = vsi->tx_itr_setting;
3208 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3209 que->me), txr->itr);
3217 #define QUEUE_NAME_LEN 32
3220 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3221 struct sysctl_ctx_list *ctx, const char *sysctl_name)
3223 struct sysctl_oid *tree;
3224 struct sysctl_oid_list *child;
3225 struct sysctl_oid_list *vsi_list;
3227 tree = device_get_sysctl_tree(pf->dev);
3228 child = SYSCTL_CHILDREN(tree);
3229 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3230 CTLFLAG_RD, NULL, "VSI Number");
3231 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3233 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3237 ixl_add_hw_stats(struct ixl_pf *pf)
3239 device_t dev = pf->dev;
3240 struct ixl_vsi *vsi = &pf->vsi;
3241 struct ixl_queue *queues = vsi->queues;
3242 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3244 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3245 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3246 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3247 struct sysctl_oid_list *vsi_list;
3249 struct sysctl_oid *queue_node;
3250 struct sysctl_oid_list *queue_list;
3252 struct tx_ring *txr;
3253 struct rx_ring *rxr;
3254 char queue_namebuf[QUEUE_NAME_LEN];
3256 /* Driver statistics */
3257 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3258 CTLFLAG_RD, &pf->watchdog_events,
3259 "Watchdog timeouts");
3260 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3261 CTLFLAG_RD, &pf->admin_irq,
3262 "Admin Queue IRQ Handled");
3264 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3265 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3267 /* Queue statistics */
3268 for (int q = 0; q < vsi->num_queues; q++) {
3269 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3270 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3271 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3272 queue_list = SYSCTL_CHILDREN(queue_node);
3274 txr = &(queues[q].txr);
3275 rxr = &(queues[q].rxr);
3277 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3278 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3279 "m_defrag() failed");
3280 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3281 CTLFLAG_RD, &(queues[q].dropped_pkts),
3282 "Driver dropped packets");
3283 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3284 CTLFLAG_RD, &(queues[q].irqs),
3285 "irqs on this queue");
3286 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3287 CTLFLAG_RD, &(queues[q].tso),
3289 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3290 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3291 "Driver tx dma failure in xmit");
3292 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3293 CTLFLAG_RD, &(txr->no_desc),
3294 "Queue No Descriptor Available");
3295 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3296 CTLFLAG_RD, &(txr->total_packets),
3297 "Queue Packets Transmitted");
3298 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3299 CTLFLAG_RD, &(txr->tx_bytes),
3300 "Queue Bytes Transmitted");
3301 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3302 CTLFLAG_RD, &(rxr->rx_packets),
3303 "Queue Packets Received");
3304 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3305 CTLFLAG_RD, &(rxr->rx_bytes),
3306 "Queue Bytes Received");
3310 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3314 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3315 struct sysctl_oid_list *child,
3316 struct i40e_eth_stats *eth_stats)
3318 struct ixl_sysctl_info ctls[] =
3320 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3321 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3322 "Unicast Packets Received"},
3323 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3324 "Multicast Packets Received"},
3325 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3326 "Broadcast Packets Received"},
3327 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3328 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3329 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3330 {ð_stats->tx_multicast, "mcast_pkts_txd",
3331 "Multicast Packets Transmitted"},
3332 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3333 "Broadcast Packets Transmitted"},
3338 struct ixl_sysctl_info *entry = ctls;
3339 while (entry->stat != 0)
3341 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3342 CTLFLAG_RD, entry->stat,
3343 entry->description);
3349 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3350 struct sysctl_oid_list *child,
3351 struct i40e_hw_port_stats *stats)
3353 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3354 CTLFLAG_RD, NULL, "Mac Statistics");
3355 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3357 struct i40e_eth_stats *eth_stats = &stats->eth;
3358 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3360 struct ixl_sysctl_info ctls[] =
3362 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3363 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3364 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3365 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3366 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3367 /* Packet Reception Stats */
3368 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3369 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3370 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3371 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3372 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3373 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3374 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3375 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3376 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3377 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3378 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3379 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3380 /* Packet Transmission Stats */
3381 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3382 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3383 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3384 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3385 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3386 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3387 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3389 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3390 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3391 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3392 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3397 struct ixl_sysctl_info *entry = ctls;
3398 while (entry->stat != 0)
3400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3401 CTLFLAG_RD, entry->stat,
3402 entry->description);
3409 ** ixl_config_rss - setup RSS
3410 ** - note this is done for the single vsi
3412 static void ixl_config_rss(struct ixl_vsi *vsi)
3414 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3415 struct i40e_hw *hw = vsi->hw;
3417 u64 set_hena = 0, hena;
3420 u32 rss_hash_config;
3421 u32 rss_seed[IXL_KEYSZ];
3423 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3424 0x183cfd8c, 0xce880440, 0x580cbc3c,
3425 0x35897377, 0x328b25e1, 0x4fa98922,
3426 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3430 /* Fetch the configured RSS key */
3431 rss_getkey((uint8_t *) &rss_seed);
3434 /* Fill out hash function seed */
3435 for (i = 0; i < IXL_KEYSZ; i++)
3436 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3438 /* Enable PCTYPES for RSS: */
3440 rss_hash_config = rss_gethashconfig();
3441 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3442 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3443 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3444 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3445 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3446 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3447 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3448 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3449 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3450 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3451 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3452 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3453 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3454 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3457 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3458 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3459 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3460 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3461 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3462 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3463 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3464 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3465 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3466 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3467 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3469 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3470 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3472 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3473 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3475 /* Populate the LUT with max no. of queues in round robin fashion */
3476 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3477 if (j == vsi->num_queues)
3481 * Fetch the RSS bucket id for the given indirection entry.
3482 * Cap it at the number of configured buckets (which is
3485 que_id = rss_get_indirection_to_bucket(i);
3486 que_id = que_id % vsi->num_queues;
3490 /* lut = 4-byte sliding window of 4 lut entries */
3491 lut = (lut << 8) | (que_id &
3492 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3493 /* On i = 3, we have 4 entries in lut; write to the register */
3495 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3502 ** This routine is run via an vlan config EVENT,
3503 ** it enables us to use the HW Filter table since
3504 ** we can get the vlan id. This just creates the
3505 ** entry in the soft version of the VFTA, init will
3506 ** repopulate the real table.
3509 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3511 struct ixl_vsi *vsi = ifp->if_softc;
3512 struct i40e_hw *hw = vsi->hw;
3513 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3515 if (ifp->if_softc != arg) /* Not our event */
3518 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3523 ixl_add_filter(vsi, hw->mac.addr, vtag);
3528 ** This routine is run via an vlan
3529 ** unconfig EVENT, remove our entry
3530 ** in the soft vfta.
3533 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3535 struct ixl_vsi *vsi = ifp->if_softc;
3536 struct i40e_hw *hw = vsi->hw;
3537 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3539 if (ifp->if_softc != arg)
3542 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3547 ixl_del_filter(vsi, hw->mac.addr, vtag);
3552 ** This routine updates vlan filters, called by init
3553 ** it scans the filter table and then updates the hw
3554 ** after a soft reset.
3557 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3559 struct ixl_mac_filter *f;
3562 if (vsi->num_vlans == 0)
3565 ** Scan the filter list for vlan entries,
3566 ** mark them for addition and then call
3567 ** for the AQ update.
3569 SLIST_FOREACH(f, &vsi->ftl, next) {
3570 if (f->flags & IXL_FILTER_VLAN) {
3578 printf("setup vlan: no filters found!\n");
3581 flags = IXL_FILTER_VLAN;
3582 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3583 ixl_add_hw_filters(vsi, flags, cnt);
3588 ** Initialize filter list and add filters that the hardware
3589 ** needs to know about.
3592 ixl_init_filters(struct ixl_vsi *vsi)
3594 /* Add broadcast address */
3595 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3599 ** This routine adds mulicast filters
3602 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3604 struct ixl_mac_filter *f;
3606 /* Does one already exist */
3607 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3611 f = ixl_get_filter(vsi);
3613 printf("WARNING: no filter available!!\n");
3616 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3617 f->vlan = IXL_VLAN_ANY;
3618 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3625 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3628 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3632 ** This routine adds macvlan filters
3635 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3637 struct ixl_mac_filter *f, *tmp;
3641 DEBUGOUT("ixl_add_filter: begin");
3646 /* Does one already exist */
3647 f = ixl_find_filter(vsi, macaddr, vlan);
3651 ** Is this the first vlan being registered, if so we
3652 ** need to remove the ANY filter that indicates we are
3653 ** not in a vlan, and replace that with a 0 filter.
3655 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3656 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3658 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3659 ixl_add_filter(vsi, macaddr, 0);
3663 f = ixl_get_filter(vsi);
3665 device_printf(dev, "WARNING: no filter available!!\n");
3668 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3670 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3671 if (f->vlan != IXL_VLAN_ANY)
3672 f->flags |= IXL_FILTER_VLAN;
3676 ixl_add_hw_filters(vsi, f->flags, 1);
3681 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3683 struct ixl_mac_filter *f;
3685 f = ixl_find_filter(vsi, macaddr, vlan);
3689 f->flags |= IXL_FILTER_DEL;
3690 ixl_del_hw_filters(vsi, 1);
3693 /* Check if this is the last vlan removal */
3694 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3695 /* Switch back to a non-vlan filter */
3696 ixl_del_filter(vsi, macaddr, 0);
3697 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3703 ** Find the filter with both matching mac addr and vlan id
3705 static struct ixl_mac_filter *
3706 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3708 struct ixl_mac_filter *f;
3711 SLIST_FOREACH(f, &vsi->ftl, next) {
3712 if (!cmp_etheraddr(f->macaddr, macaddr))
3714 if (f->vlan == vlan) {
3726 ** This routine takes additions to the vsi filter
3727 ** table and creates an Admin Queue call to create
3728 ** the filters in the hardware.
3731 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3733 struct i40e_aqc_add_macvlan_element_data *a, *b;
3734 struct ixl_mac_filter *f;
3743 IXL_PF_LOCK_ASSERT(pf);
3745 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3746 M_DEVBUF, M_NOWAIT | M_ZERO);
3748 device_printf(dev, "add_hw_filters failed to get memory\n");
3753 ** Scan the filter list, each time we find one
3754 ** we add it to the admin queue array and turn off
3757 SLIST_FOREACH(f, &vsi->ftl, next) {
3758 if (f->flags == flags) {
3759 b = &a[j]; // a pox on fvl long names :)
3760 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3761 if (f->vlan == IXL_VLAN_ANY) {
3763 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3765 b->vlan_tag = f->vlan;
3768 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3769 f->flags &= ~IXL_FILTER_ADD;
3776 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3778 device_printf(dev, "aq_add_macvlan err %d, "
3779 "aq_error %d\n", err, hw->aq.asq_last_status);
3781 vsi->hw_filters_add += j;
3788 ** This routine takes removals in the vsi filter
3789 ** table and creates an Admin Queue call to delete
3790 ** the filters in the hardware.
3793 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3795 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3799 struct ixl_mac_filter *f, *f_temp;
3802 DEBUGOUT("ixl_del_hw_filters: begin\n");
3808 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3809 M_DEVBUF, M_NOWAIT | M_ZERO);
3811 printf("del hw filter failed to get memory\n");
3815 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3816 if (f->flags & IXL_FILTER_DEL) {
3817 e = &d[j]; // a pox on fvl long names :)
3818 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3819 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3820 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3821 /* delete entry from vsi list */
3822 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3830 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3831 /* NOTE: returns ENOENT every time but seems to work fine,
3832 so we'll ignore that specific error. */
3833 // TODO: Does this still occur on current firmwares?
3834 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3836 for (int i = 0; i < j; i++)
3837 sc += (!d[i].error_code);
3838 vsi->hw_filters_del += sc;
3840 "Failed to remove %d/%d filters, aq error %d\n",
3841 j - sc, j, hw->aq.asq_last_status);
3843 vsi->hw_filters_del += j;
3847 DEBUGOUT("ixl_del_hw_filters: end\n");
3852 ixl_enable_rings(struct ixl_vsi *vsi)
3854 struct ixl_pf *pf = vsi->back;
3855 struct i40e_hw *hw = &pf->hw;
3860 for (int i = 0; i < vsi->num_queues; i++) {
3861 index = vsi->first_queue + i;
3862 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3864 reg = rd32(hw, I40E_QTX_ENA(index));
3865 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3866 I40E_QTX_ENA_QENA_STAT_MASK;
3867 wr32(hw, I40E_QTX_ENA(index), reg);
3868 /* Verify the enable took */
3869 for (int j = 0; j < 10; j++) {
3870 reg = rd32(hw, I40E_QTX_ENA(index));
3871 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3873 i40e_msec_delay(10);
3875 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3876 device_printf(pf->dev, "TX queue %d disabled!\n",
3881 reg = rd32(hw, I40E_QRX_ENA(index));
3882 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3883 I40E_QRX_ENA_QENA_STAT_MASK;
3884 wr32(hw, I40E_QRX_ENA(index), reg);
3885 /* Verify the enable took */
3886 for (int j = 0; j < 10; j++) {
3887 reg = rd32(hw, I40E_QRX_ENA(index));
3888 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3890 i40e_msec_delay(10);
3892 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3893 device_printf(pf->dev, "RX queue %d disabled!\n",
3903 ixl_disable_rings(struct ixl_vsi *vsi)
3905 struct ixl_pf *pf = vsi->back;
3906 struct i40e_hw *hw = &pf->hw;
3911 for (int i = 0; i < vsi->num_queues; i++) {
3912 index = vsi->first_queue + i;
3914 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3915 i40e_usec_delay(500);
3917 reg = rd32(hw, I40E_QTX_ENA(index));
3918 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3919 wr32(hw, I40E_QTX_ENA(index), reg);
3920 /* Verify the disable took */
3921 for (int j = 0; j < 10; j++) {
3922 reg = rd32(hw, I40E_QTX_ENA(index));
3923 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3925 i40e_msec_delay(10);
3927 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3928 device_printf(pf->dev, "TX queue %d still enabled!\n",
3933 reg = rd32(hw, I40E_QRX_ENA(index));
3934 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3935 wr32(hw, I40E_QRX_ENA(index), reg);
3936 /* Verify the disable took */
3937 for (int j = 0; j < 10; j++) {
3938 reg = rd32(hw, I40E_QRX_ENA(index));
3939 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3941 i40e_msec_delay(10);
3943 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3944 device_printf(pf->dev, "RX queue %d still enabled!\n",
3954 * ixl_handle_mdd_event
3956 * Called from interrupt handler to identify possibly malicious vfs
3957 * (But also detects events from the PF, as well)
3959 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3961 struct i40e_hw *hw = &pf->hw;
3962 device_t dev = pf->dev;
3963 bool mdd_detected = false;
3964 bool pf_mdd_detected = false;
3967 /* find what triggered the MDD event */
3968 reg = rd32(hw, I40E_GL_MDET_TX);
3969 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3970 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3971 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3972 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3973 I40E_GL_MDET_TX_EVENT_SHIFT;
3974 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3975 I40E_GL_MDET_TX_QUEUE_SHIFT;
3977 "Malicious Driver Detection event 0x%02x"
3978 " on TX queue %d pf number 0x%02x\n",
3979 event, queue, pf_num);
3980 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3981 mdd_detected = true;
3983 reg = rd32(hw, I40E_GL_MDET_RX);
3984 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3985 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3986 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3987 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3988 I40E_GL_MDET_RX_EVENT_SHIFT;
3989 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3990 I40E_GL_MDET_RX_QUEUE_SHIFT;
3992 "Malicious Driver Detection event 0x%02x"
3993 " on RX queue %d of function 0x%02x\n",
3994 event, queue, func);
3995 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3996 mdd_detected = true;
4000 reg = rd32(hw, I40E_PF_MDET_TX);
4001 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4002 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4004 "MDD TX event is for this function 0x%08x",
4006 pf_mdd_detected = true;
4008 reg = rd32(hw, I40E_PF_MDET_RX);
4009 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4010 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4012 "MDD RX event is for this function 0x%08x",
4014 pf_mdd_detected = true;
4018 /* re-enable mdd interrupt cause */
4019 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4020 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4021 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4026 ixl_enable_intr(struct ixl_vsi *vsi)
4028 struct i40e_hw *hw = vsi->hw;
4029 struct ixl_queue *que = vsi->queues;
4031 if (ixl_enable_msix) {
4032 ixl_enable_adminq(hw);
4033 for (int i = 0; i < vsi->num_queues; i++, que++)
4034 ixl_enable_queue(hw, que->me);
4036 ixl_enable_legacy(hw);
4040 ixl_disable_rings_intr(struct ixl_vsi *vsi)
4042 struct i40e_hw *hw = vsi->hw;
4043 struct ixl_queue *que = vsi->queues;
4045 for (int i = 0; i < vsi->num_queues; i++, que++)
4046 ixl_disable_queue(hw, que->me);
4050 ixl_disable_intr(struct ixl_vsi *vsi)
4052 struct i40e_hw *hw = vsi->hw;
4054 if (ixl_enable_msix)
4055 ixl_disable_adminq(hw);
4057 ixl_disable_legacy(hw);
4061 ixl_enable_adminq(struct i40e_hw *hw)
4065 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4066 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4067 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4068 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4074 ixl_disable_adminq(struct i40e_hw *hw)
4078 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4079 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4085 ixl_enable_queue(struct i40e_hw *hw, int id)
4089 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4090 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4091 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4092 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4096 ixl_disable_queue(struct i40e_hw *hw, int id)
4100 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4101 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4107 ixl_enable_legacy(struct i40e_hw *hw)
4110 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4111 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4112 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4113 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4117 ixl_disable_legacy(struct i40e_hw *hw)
4121 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4122 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4128 ixl_update_stats_counters(struct ixl_pf *pf)
4130 struct i40e_hw *hw = &pf->hw;
4131 struct ixl_vsi *vsi = &pf->vsi;
4134 struct i40e_hw_port_stats *nsd = &pf->stats;
4135 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4137 /* Update hw stats */
4138 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4139 pf->stat_offsets_loaded,
4140 &osd->crc_errors, &nsd->crc_errors);
4141 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4142 pf->stat_offsets_loaded,
4143 &osd->illegal_bytes, &nsd->illegal_bytes);
4144 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4145 I40E_GLPRT_GORCL(hw->port),
4146 pf->stat_offsets_loaded,
4147 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4148 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4149 I40E_GLPRT_GOTCL(hw->port),
4150 pf->stat_offsets_loaded,
4151 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4152 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4153 pf->stat_offsets_loaded,
4154 &osd->eth.rx_discards,
4155 &nsd->eth.rx_discards);
4156 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4157 I40E_GLPRT_UPRCL(hw->port),
4158 pf->stat_offsets_loaded,
4159 &osd->eth.rx_unicast,
4160 &nsd->eth.rx_unicast);
4161 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4162 I40E_GLPRT_UPTCL(hw->port),
4163 pf->stat_offsets_loaded,
4164 &osd->eth.tx_unicast,
4165 &nsd->eth.tx_unicast);
4166 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4167 I40E_GLPRT_MPRCL(hw->port),
4168 pf->stat_offsets_loaded,
4169 &osd->eth.rx_multicast,
4170 &nsd->eth.rx_multicast);
4171 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4172 I40E_GLPRT_MPTCL(hw->port),
4173 pf->stat_offsets_loaded,
4174 &osd->eth.tx_multicast,
4175 &nsd->eth.tx_multicast);
4176 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4177 I40E_GLPRT_BPRCL(hw->port),
4178 pf->stat_offsets_loaded,
4179 &osd->eth.rx_broadcast,
4180 &nsd->eth.rx_broadcast);
4181 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4182 I40E_GLPRT_BPTCL(hw->port),
4183 pf->stat_offsets_loaded,
4184 &osd->eth.tx_broadcast,
4185 &nsd->eth.tx_broadcast);
4187 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4188 pf->stat_offsets_loaded,
4189 &osd->tx_dropped_link_down,
4190 &nsd->tx_dropped_link_down);
4191 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4192 pf->stat_offsets_loaded,
4193 &osd->mac_local_faults,
4194 &nsd->mac_local_faults);
4195 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4196 pf->stat_offsets_loaded,
4197 &osd->mac_remote_faults,
4198 &nsd->mac_remote_faults);
4199 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4200 pf->stat_offsets_loaded,
4201 &osd->rx_length_errors,
4202 &nsd->rx_length_errors);
4204 /* Flow control (LFC) stats */
4205 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4206 pf->stat_offsets_loaded,
4207 &osd->link_xon_rx, &nsd->link_xon_rx);
4208 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4209 pf->stat_offsets_loaded,
4210 &osd->link_xon_tx, &nsd->link_xon_tx);
4211 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4212 pf->stat_offsets_loaded,
4213 &osd->link_xoff_rx, &nsd->link_xoff_rx);
4214 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4215 pf->stat_offsets_loaded,
4216 &osd->link_xoff_tx, &nsd->link_xoff_tx);
4218 /* Packet size stats rx */
4219 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4220 I40E_GLPRT_PRC64L(hw->port),
4221 pf->stat_offsets_loaded,
4222 &osd->rx_size_64, &nsd->rx_size_64);
4223 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4224 I40E_GLPRT_PRC127L(hw->port),
4225 pf->stat_offsets_loaded,
4226 &osd->rx_size_127, &nsd->rx_size_127);
4227 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4228 I40E_GLPRT_PRC255L(hw->port),
4229 pf->stat_offsets_loaded,
4230 &osd->rx_size_255, &nsd->rx_size_255);
4231 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4232 I40E_GLPRT_PRC511L(hw->port),
4233 pf->stat_offsets_loaded,
4234 &osd->rx_size_511, &nsd->rx_size_511);
4235 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4236 I40E_GLPRT_PRC1023L(hw->port),
4237 pf->stat_offsets_loaded,
4238 &osd->rx_size_1023, &nsd->rx_size_1023);
4239 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4240 I40E_GLPRT_PRC1522L(hw->port),
4241 pf->stat_offsets_loaded,
4242 &osd->rx_size_1522, &nsd->rx_size_1522);
4243 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4244 I40E_GLPRT_PRC9522L(hw->port),
4245 pf->stat_offsets_loaded,
4246 &osd->rx_size_big, &nsd->rx_size_big);
4248 /* Packet size stats tx */
4249 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4250 I40E_GLPRT_PTC64L(hw->port),
4251 pf->stat_offsets_loaded,
4252 &osd->tx_size_64, &nsd->tx_size_64);
4253 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4254 I40E_GLPRT_PTC127L(hw->port),
4255 pf->stat_offsets_loaded,
4256 &osd->tx_size_127, &nsd->tx_size_127);
4257 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4258 I40E_GLPRT_PTC255L(hw->port),
4259 pf->stat_offsets_loaded,
4260 &osd->tx_size_255, &nsd->tx_size_255);
4261 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4262 I40E_GLPRT_PTC511L(hw->port),
4263 pf->stat_offsets_loaded,
4264 &osd->tx_size_511, &nsd->tx_size_511);
4265 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4266 I40E_GLPRT_PTC1023L(hw->port),
4267 pf->stat_offsets_loaded,
4268 &osd->tx_size_1023, &nsd->tx_size_1023);
4269 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4270 I40E_GLPRT_PTC1522L(hw->port),
4271 pf->stat_offsets_loaded,
4272 &osd->tx_size_1522, &nsd->tx_size_1522);
4273 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4274 I40E_GLPRT_PTC9522L(hw->port),
4275 pf->stat_offsets_loaded,
4276 &osd->tx_size_big, &nsd->tx_size_big);
4278 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4279 pf->stat_offsets_loaded,
4280 &osd->rx_undersize, &nsd->rx_undersize);
4281 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4282 pf->stat_offsets_loaded,
4283 &osd->rx_fragments, &nsd->rx_fragments);
4284 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4285 pf->stat_offsets_loaded,
4286 &osd->rx_oversize, &nsd->rx_oversize);
4287 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4288 pf->stat_offsets_loaded,
4289 &osd->rx_jabber, &nsd->rx_jabber);
4290 pf->stat_offsets_loaded = true;
4293 /* Update vsi stats */
4294 ixl_update_vsi_stats(vsi);
4296 for (int i = 0; i < pf->num_vfs; i++) {
4298 if (vf->vf_flags & VF_FLAG_ENABLED)
4299 ixl_update_eth_stats(&pf->vfs[i].vsi);
4304 ** Tasklet handler for MSIX Adminq interrupts
4305 ** - do outside interrupt since it might sleep
4308 ixl_do_adminq(void *context, int pending)
4310 struct ixl_pf *pf = context;
4311 struct i40e_hw *hw = &pf->hw;
4312 struct ixl_vsi *vsi = &pf->vsi;
4313 struct i40e_arq_event_info event;
4318 event.buf_len = IXL_AQ_BUF_SZ;
4319 event.msg_buf = malloc(event.buf_len,
4320 M_DEVBUF, M_NOWAIT | M_ZERO);
4321 if (!event.msg_buf) {
4322 printf("Unable to allocate adminq memory\n");
4327 /* clean and process any events */
4329 ret = i40e_clean_arq_element(hw, &event, &result);
4332 opcode = LE16_TO_CPU(event.desc.opcode);
4334 case i40e_aqc_opc_get_link_status:
4335 ixl_link_event(pf, &event);
4336 ixl_update_link_status(pf);
4338 case i40e_aqc_opc_send_msg_to_pf:
4340 ixl_handle_vf_msg(pf, &event);
4343 case i40e_aqc_opc_event_lan_overflow:
4347 printf("AdminQ unknown event %x\n", opcode);
4352 } while (result && (loop++ < IXL_ADM_LIMIT));
4354 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4355 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4356 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4357 free(event.msg_buf, M_DEVBUF);
4360 * If there are still messages to process, reschedule ourselves.
4361 * Otherwise, re-enable our interrupt and go to sleep.
4364 taskqueue_enqueue(pf->tq, &pf->adminq);
4366 ixl_enable_intr(vsi);
4372 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4375 int error, input = 0;
4377 error = sysctl_handle_int(oidp, &input, 0, req);
4379 if (error || !req->newptr)
4383 pf = (struct ixl_pf *)arg1;
4384 ixl_print_debug_info(pf);
4391 ixl_print_debug_info(struct ixl_pf *pf)
4393 struct i40e_hw *hw = &pf->hw;
4394 struct ixl_vsi *vsi = &pf->vsi;
4395 struct ixl_queue *que = vsi->queues;
4396 struct rx_ring *rxr = &que->rxr;
4397 struct tx_ring *txr = &que->txr;
4401 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4402 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4403 printf("RX next check = %x\n", rxr->next_check);
4404 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4405 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4406 printf("TX desc avail = %x\n", txr->avail);
4408 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4409 printf("RX Bytes = %x\n", reg);
4410 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4411 printf("Port RX Bytes = %x\n", reg);
4412 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4413 printf("RX discard = %x\n", reg);
4414 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4415 printf("Port RX discard = %x\n", reg);
4417 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4418 printf("TX errors = %x\n", reg);
4419 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4420 printf("TX Bytes = %x\n", reg);
4422 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4423 printf("RX undersize = %x\n", reg);
4424 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4425 printf("RX fragments = %x\n", reg);
4426 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4427 printf("RX oversize = %x\n", reg);
4428 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4429 printf("RX length error = %x\n", reg);
4430 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4431 printf("mac remote fault = %x\n", reg);
4432 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4433 printf("mac local fault = %x\n", reg);
4437 * Update VSI-specific ethernet statistics counters.
4439 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4441 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4442 struct i40e_hw *hw = &pf->hw;
4443 struct i40e_eth_stats *es;
4444 struct i40e_eth_stats *oes;
4445 struct i40e_hw_port_stats *nsd;
4446 u16 stat_idx = vsi->info.stat_counter_idx;
4448 es = &vsi->eth_stats;
4449 oes = &vsi->eth_stats_offsets;
4452 /* Gather up the stats that the hw collects */
4453 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4454 vsi->stat_offsets_loaded,
4455 &oes->tx_errors, &es->tx_errors);
4456 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4457 vsi->stat_offsets_loaded,
4458 &oes->rx_discards, &es->rx_discards);
4460 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4461 I40E_GLV_GORCL(stat_idx),
4462 vsi->stat_offsets_loaded,
4463 &oes->rx_bytes, &es->rx_bytes);
4464 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4465 I40E_GLV_UPRCL(stat_idx),
4466 vsi->stat_offsets_loaded,
4467 &oes->rx_unicast, &es->rx_unicast);
4468 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4469 I40E_GLV_MPRCL(stat_idx),
4470 vsi->stat_offsets_loaded,
4471 &oes->rx_multicast, &es->rx_multicast);
4472 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4473 I40E_GLV_BPRCL(stat_idx),
4474 vsi->stat_offsets_loaded,
4475 &oes->rx_broadcast, &es->rx_broadcast);
4477 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4478 I40E_GLV_GOTCL(stat_idx),
4479 vsi->stat_offsets_loaded,
4480 &oes->tx_bytes, &es->tx_bytes);
4481 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4482 I40E_GLV_UPTCL(stat_idx),
4483 vsi->stat_offsets_loaded,
4484 &oes->tx_unicast, &es->tx_unicast);
4485 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4486 I40E_GLV_MPTCL(stat_idx),
4487 vsi->stat_offsets_loaded,
4488 &oes->tx_multicast, &es->tx_multicast);
4489 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4490 I40E_GLV_BPTCL(stat_idx),
4491 vsi->stat_offsets_loaded,
4492 &oes->tx_broadcast, &es->tx_broadcast);
4493 vsi->stat_offsets_loaded = true;
4497 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4501 struct i40e_eth_stats *es;
4504 struct i40e_hw_port_stats *nsd;
4508 es = &vsi->eth_stats;
4511 ixl_update_eth_stats(vsi);
4513 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4514 for (int i = 0; i < vsi->num_queues; i++)
4515 tx_discards += vsi->queues[i].txr.br->br_drops;
4517 /* Update ifnet stats */
4518 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4521 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4524 IXL_SET_IBYTES(vsi, es->rx_bytes);
4525 IXL_SET_OBYTES(vsi, es->tx_bytes);
4526 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4527 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4529 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4530 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4532 IXL_SET_OERRORS(vsi, es->tx_errors);
4533 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4534 IXL_SET_OQDROPS(vsi, tx_discards);
4535 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4536 IXL_SET_COLLISIONS(vsi, 0);
4540 * Reset all of the stats for the given pf
4542 void ixl_pf_reset_stats(struct ixl_pf *pf)
4544 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4545 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4546 pf->stat_offsets_loaded = false;
4550 * Resets all stats of the given vsi
4552 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4554 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4555 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4556 vsi->stat_offsets_loaded = false;
4560 * Read and update a 48 bit stat from the hw
4562 * Since the device stats are not reset at PFReset, they likely will not
4563 * be zeroed when the driver starts. We'll save the first values read
4564 * and use them as offsets to be subtracted from the raw values in order
4565 * to report stats that count from zero.
4568 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4569 bool offset_loaded, u64 *offset, u64 *stat)
4573 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4574 new_data = rd64(hw, loreg);
4577 * Use two rd32's instead of one rd64; FreeBSD versions before
4578 * 10 don't support 8 byte bus reads/writes.
4580 new_data = rd32(hw, loreg);
4581 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4586 if (new_data >= *offset)
4587 *stat = new_data - *offset;
4589 *stat = (new_data + ((u64)1 << 48)) - *offset;
4590 *stat &= 0xFFFFFFFFFFFFULL;
4594 * Read and update a 32 bit stat from the hw
4597 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4598 bool offset_loaded, u64 *offset, u64 *stat)
4602 new_data = rd32(hw, reg);
4605 if (new_data >= *offset)
4606 *stat = (u32)(new_data - *offset);
4608 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4612 ** Set flow control using sysctl:
4619 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4622 * TODO: ensure flow control is disabled if
4623 * priority flow control is enabled
4625 * TODO: ensure tx CRC by hardware should be enabled
4626 * if tx flow control is enabled.
4628 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4629 struct i40e_hw *hw = &pf->hw;
4630 device_t dev = pf->dev;
4632 enum i40e_status_code aq_error = 0;
4636 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4637 if ((error) || (req->newptr == NULL))
4639 if (pf->fc < 0 || pf->fc > 3) {
4641 "Invalid fc mode; valid modes are 0 through 3\n");
4646 ** Changing flow control mode currently does not work on
4649 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4650 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4651 device_printf(dev, "Changing flow control mode unsupported"
4652 " on 40GBase-CR4 media.\n");
4656 /* Set fc ability for port */
4657 hw->fc.requested_mode = pf->fc;
4658 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4661 "%s: Error setting new fc mode %d; fc_err %#x\n",
4662 __func__, aq_error, fc_aq_err);
4670 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4672 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4673 struct i40e_hw *hw = &pf->hw;
4674 int error = 0, index = 0;
4685 ixl_update_link_status(pf);
4687 switch (hw->phy.link_info.link_speed) {
4688 case I40E_LINK_SPEED_100MB:
4691 case I40E_LINK_SPEED_1GB:
4694 case I40E_LINK_SPEED_10GB:
4697 case I40E_LINK_SPEED_40GB:
4700 case I40E_LINK_SPEED_20GB:
4703 case I40E_LINK_SPEED_UNKNOWN:
4709 error = sysctl_handle_string(oidp, speeds[index],
4710 strlen(speeds[index]), req);
4715 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4717 struct i40e_hw *hw = &pf->hw;
4718 device_t dev = pf->dev;
4719 struct i40e_aq_get_phy_abilities_resp abilities;
4720 struct i40e_aq_set_phy_config config;
4721 enum i40e_status_code aq_error = 0;
4723 /* Get current capability information */
4724 aq_error = i40e_aq_get_phy_capabilities(hw,
4725 FALSE, FALSE, &abilities, NULL);
4728 "%s: Error getting phy capabilities %d,"
4729 " aq error: %d\n", __func__, aq_error,
4730 hw->aq.asq_last_status);
4734 /* Prepare new config */
4735 bzero(&config, sizeof(config));
4736 config.phy_type = abilities.phy_type;
4737 config.abilities = abilities.abilities
4738 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4739 config.eee_capability = abilities.eee_capability;
4740 config.eeer = abilities.eeer_val;
4741 config.low_power_ctrl = abilities.d3_lpan;
4742 /* Translate into aq cmd link_speed */
4744 config.link_speed |= I40E_LINK_SPEED_20GB;
4746 config.link_speed |= I40E_LINK_SPEED_10GB;
4748 config.link_speed |= I40E_LINK_SPEED_1GB;
4750 config.link_speed |= I40E_LINK_SPEED_100MB;
4752 /* Do aq command & restart link */
4753 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4756 "%s: Error setting new phy config %d,"
4757 " aq error: %d\n", __func__, aq_error,
4758 hw->aq.asq_last_status);
4763 ** This seems a bit heavy handed, but we
4764 ** need to get a reinit on some devices
4768 ixl_init_locked(pf);
4775 ** Control link advertise speed:
4777 ** 0x1 - advertise 100 Mb
4778 ** 0x2 - advertise 1G
4779 ** 0x4 - advertise 10G
4780 ** 0x8 - advertise 20G
4782 ** Does not work on 40G devices.
4785 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4787 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4788 struct i40e_hw *hw = &pf->hw;
4789 device_t dev = pf->dev;
4790 int requested_ls = 0;
4794 ** FW doesn't support changing advertised speed
4795 ** for 40G devices; speed is always 40G.
4797 if (i40e_is_40G_device(hw->device_id))
4800 /* Read in new mode */
4801 requested_ls = pf->advertised_speed;
4802 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4803 if ((error) || (req->newptr == NULL))
4805 /* Check for sane value */
4806 if (requested_ls < 0x1 || requested_ls > 0xE) {
4807 device_printf(dev, "Invalid advertised speed; "
4808 "valid modes are 0x1 through 0xE\n");
4811 /* Then check for validity based on adapter type */
4812 switch (hw->device_id) {
4813 case I40E_DEV_ID_10G_BASE_T:
4814 if (requested_ls & 0x8) {
4816 "20Gbs speed not supported on this device.\n");
4820 case I40E_DEV_ID_20G_KR2:
4821 if (requested_ls & 0x1) {
4823 "100Mbs speed not supported on this device.\n");
4828 if (requested_ls & ~0x6) {
4830 "Only 1/10Gbs speeds are supported on this device.\n");
4836 /* Exit if no change */
4837 if (pf->advertised_speed == requested_ls)
4840 error = ixl_set_advertised_speeds(pf, requested_ls);
4844 pf->advertised_speed = requested_ls;
4845 ixl_update_link_status(pf);
4850 ** Get the width and transaction speed of
4851 ** the bus this adapter is plugged into.
4854 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4860 /* Get the PCI Express Capabilities offset */
4861 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4863 /* ...and read the Link Status Register */
4864 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4866 switch (link & I40E_PCI_LINK_WIDTH) {
4867 case I40E_PCI_LINK_WIDTH_1:
4868 hw->bus.width = i40e_bus_width_pcie_x1;
4870 case I40E_PCI_LINK_WIDTH_2:
4871 hw->bus.width = i40e_bus_width_pcie_x2;
4873 case I40E_PCI_LINK_WIDTH_4:
4874 hw->bus.width = i40e_bus_width_pcie_x4;
4876 case I40E_PCI_LINK_WIDTH_8:
4877 hw->bus.width = i40e_bus_width_pcie_x8;
4880 hw->bus.width = i40e_bus_width_unknown;
4884 switch (link & I40E_PCI_LINK_SPEED) {
4885 case I40E_PCI_LINK_SPEED_2500:
4886 hw->bus.speed = i40e_bus_speed_2500;
4888 case I40E_PCI_LINK_SPEED_5000:
4889 hw->bus.speed = i40e_bus_speed_5000;
4891 case I40E_PCI_LINK_SPEED_8000:
4892 hw->bus.speed = i40e_bus_speed_8000;
4895 hw->bus.speed = i40e_bus_speed_unknown;
4900 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4901 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4902 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4903 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4904 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4905 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4906 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4909 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4910 (hw->bus.speed < i40e_bus_speed_8000)) {
4911 device_printf(dev, "PCI-Express bandwidth available"
4912 " for this device\n may be insufficient for"
4913 " optimal performance.\n");
4914 device_printf(dev, "For expected performance a x8 "
4915 "PCIE Gen3 slot is required.\n");
4922 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4924 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4925 struct i40e_hw *hw = &pf->hw;
4928 snprintf(buf, sizeof(buf),
4929 "f%d.%d a%d.%d n%02x.%02x e%08x",
4930 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4931 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4932 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4933 IXL_NVM_VERSION_HI_SHIFT,
4934 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4935 IXL_NVM_VERSION_LO_SHIFT,
4937 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4941 #ifdef IXL_DEBUG_SYSCTL
4943 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4945 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4946 struct i40e_hw *hw = &pf->hw;
4947 struct i40e_link_status link_status;
4950 enum i40e_status_code aq_error = 0;
4952 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4954 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4959 "PHY Type : %#04x\n"
4961 "Link info: %#04x\n"
4964 link_status.phy_type, link_status.link_speed,
4965 link_status.link_info, link_status.an_info,
4966 link_status.ext_info);
4968 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4972 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4974 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4975 struct i40e_hw *hw = &pf->hw;
4977 enum i40e_status_code aq_error = 0;
4979 struct i40e_aq_get_phy_abilities_resp abilities;
4981 aq_error = i40e_aq_get_phy_capabilities(hw,
4982 TRUE, FALSE, &abilities, NULL);
4984 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4989 "PHY Type : %#010x\n"
4991 "Abilities: %#04x\n"
4993 "EEER reg : %#010x\n"
4995 abilities.phy_type, abilities.link_speed,
4996 abilities.abilities, abilities.eee_capability,
4997 abilities.eeer_val, abilities.d3_lpan);
4999 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5003 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5005 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5006 struct ixl_vsi *vsi = &pf->vsi;
5007 struct ixl_mac_filter *f;
5012 int ftl_counter = 0;
5016 SLIST_FOREACH(f, &vsi->ftl, next) {
5021 sysctl_handle_string(oidp, "(none)", 6, req);
5025 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5026 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5028 sprintf(buf_i++, "\n");
5029 SLIST_FOREACH(f, &vsi->ftl, next) {
5031 MAC_FORMAT ", vlan %4d, flags %#06x",
5032 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5034 /* don't print '\n' for last entry */
5035 if (++ftl_counter != ftl_len) {
5036 sprintf(buf_i, "\n");
5041 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5043 printf("sysctl error: %d\n", error);
5044 free(buf, M_DEVBUF);
5048 #define IXL_SW_RES_SIZE 0x14
5050 ixl_res_alloc_cmp(const void *a, const void *b)
5052 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5053 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5054 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5056 return ((int)one->resource_type - (int)two->resource_type);
5060 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5062 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5063 struct i40e_hw *hw = &pf->hw;
5064 device_t dev = pf->dev;
5069 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5071 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5073 device_printf(dev, "Could not allocate sbuf for output.\n");
5077 bzero(resp, sizeof(resp));
5078 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5084 "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5085 __func__, error, hw->aq.asq_last_status);
5090 /* Sort entries by type for display */
5091 qsort(resp, num_entries,
5092 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5093 &ixl_res_alloc_cmp);
5095 sbuf_cat(buf, "\n");
5096 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5098 "Type | Guaranteed | Total | Used | Un-allocated\n"
5099 " | (this) | (all) | (this) | (all) \n");
5100 for (int i = 0; i < num_entries; i++) {
5102 "%#4x | %10d %5d %6d %12d",
5103 resp[i].resource_type,
5107 resp[i].total_unalloced);
5108 if (i < num_entries - 1)
5109 sbuf_cat(buf, "\n");
5112 error = sbuf_finish(buf);
5114 device_printf(dev, "Error finishing sbuf: %d\n", error);
5119 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5121 device_printf(dev, "sysctl error: %d\n", error);
5128 ** Caller must init and delete sbuf; this function will clear and
5129 ** finish it for caller.
5132 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5136 if (seid == 0 && uplink)
5137 sbuf_cat(s, "Network");
5139 sbuf_cat(s, "Host");
5143 sbuf_printf(s, "MAC %d", seid - 2);
5144 else if (seid <= 15)
5145 sbuf_cat(s, "Reserved");
5146 else if (seid <= 31)
5147 sbuf_printf(s, "PF %d", seid - 16);
5148 else if (seid <= 159)
5149 sbuf_printf(s, "VF %d", seid - 32);
5150 else if (seid <= 287)
5151 sbuf_cat(s, "Reserved");
5152 else if (seid <= 511)
5153 sbuf_cat(s, "Other"); // for other structures
5154 else if (seid <= 895)
5155 sbuf_printf(s, "VSI %d", seid - 512);
5156 else if (seid <= 1023)
5157 sbuf_printf(s, "Reserved");
5159 sbuf_cat(s, "Invalid");
5162 return sbuf_data(s);
5166 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5168 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5169 struct i40e_hw *hw = &pf->hw;
5170 device_t dev = pf->dev;
5174 u8 aq_buf[I40E_AQ_LARGE_BUF];
5177 struct i40e_aqc_get_switch_config_resp *sw_config;
5178 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5180 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5182 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5186 error = i40e_aq_get_switch_config(hw, sw_config,
5187 sizeof(aq_buf), &next, NULL);
5190 "%s: aq_get_switch_config() error %d, aq error %d\n",
5191 __func__, error, hw->aq.asq_last_status);
5196 nmbuf = sbuf_new_auto();
5198 device_printf(dev, "Could not allocate sbuf for name output.\n");
5202 sbuf_cat(buf, "\n");
5203 // Assuming <= 255 elements in switch
5204 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5206 ** Revision -- all elements are revision 1 for now
5209 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5210 " | | | (uplink)\n");
5211 for (int i = 0; i < sw_config->header.num_reported; i++) {
5212 // "%4d (%8s) | %8s %8s %#8x",
5213 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5215 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5216 sw_config->element[i].seid, false));
5217 sbuf_cat(buf, " | ");
5218 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5219 sw_config->element[i].uplink_seid, true));
5221 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5222 sw_config->element[i].downlink_seid, false));
5224 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5225 if (i < sw_config->header.num_reported - 1)
5226 sbuf_cat(buf, "\n");
5230 error = sbuf_finish(buf);
5232 device_printf(dev, "Error finishing sbuf: %d\n", error);
5237 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5239 device_printf(dev, "sysctl error: %d\n", error);
5244 #endif /* IXL_DEBUG_SYSCTL */
5249 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5252 struct ixl_vsi *vsi;
5253 struct i40e_vsi_context vsi_ctx;
5255 uint16_t first_queue;
5256 enum i40e_status_code code;
5261 vsi_ctx.pf_num = hw->pf_id;
5262 vsi_ctx.uplink_seid = pf->veb_seid;
5263 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5264 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5265 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5267 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5269 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5270 vsi_ctx.info.switch_id = htole16(0);
5272 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5273 vsi_ctx.info.sec_flags = 0;
5274 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5275 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5277 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5278 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5279 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5281 vsi_ctx.info.valid_sections |=
5282 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5283 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5284 first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5285 for (i = 0; i < IXLV_MAX_QUEUES; i++)
5286 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5287 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5288 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5290 vsi_ctx.info.tc_mapping[0] = htole16(
5291 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5292 (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5294 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5295 if (code != I40E_SUCCESS)
5296 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5297 vf->vsi.seid = vsi_ctx.seid;
5298 vf->vsi.vsi_num = vsi_ctx.vsi_number;
5299 vf->vsi.first_queue = first_queue;
5300 vf->vsi.num_queues = IXLV_MAX_QUEUES;
5302 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5303 if (code != I40E_SUCCESS)
5304 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5306 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5307 if (code != I40E_SUCCESS) {
5308 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5309 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5310 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5313 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5318 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5325 error = ixl_vf_alloc_vsi(pf, vf);
5329 vf->vsi.hw_filters_add = 0;
5330 vf->vsi.hw_filters_del = 0;
5331 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5332 ixl_reconfigure_filters(&vf->vsi);
5338 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5345 * Two queues are mapped in a single register, so we have to do some
5346 * gymnastics to convert the queue number into a register index and
5350 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5352 qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5353 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5354 qtable |= val << shift;
5355 wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5359 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5368 * Contiguous mappings aren't actually supported by the hardware,
5369 * so we have to use non-contiguous mappings.
5371 wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5372 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5374 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5375 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5377 for (i = 0; i < vf->vsi.num_queues; i++) {
5378 qtable = (vf->vsi.first_queue + i) <<
5379 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5381 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5384 /* Map queues allocated to VF to its VSI. */
5385 for (i = 0; i < vf->vsi.num_queues; i++)
5386 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5388 /* Set rest of VSI queues as unused. */
5389 for (; i < IXL_MAX_VSI_QUEUES; i++)
5390 ixl_vf_map_vsi_queue(hw, vf, i,
5391 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5397 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5406 i40e_aq_delete_element(hw, vsi->seid, NULL);
5410 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5413 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5418 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5421 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5422 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5427 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5430 uint32_t vfint_reg, vpint_reg;
5435 ixl_vf_vsi_release(pf, &vf->vsi);
5437 /* Index 0 has a special register. */
5438 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5440 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5441 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5442 ixl_vf_disable_queue_intr(hw, vfint_reg);
5445 /* Index 0 has a special register. */
5446 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5448 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5449 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5450 ixl_vf_unregister_intr(hw, vpint_reg);
5453 vf->vsi.num_queues = 0;
5457 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5461 uint16_t global_vf_num;
5465 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5467 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5468 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5469 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5470 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5471 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5480 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5487 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5488 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5489 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5492 ixl_reinit_vf(pf, vf);
5496 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5499 uint32_t vfrstat, vfrtrig;
5504 error = ixl_flush_pcie(pf, vf);
5506 device_printf(pf->dev,
5507 "Timed out waiting for PCIe activity to stop on VF-%d\n",
5510 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5513 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5514 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5518 if (i == IXL_VF_RESET_TIMEOUT)
5519 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5521 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5523 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5524 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5525 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5527 if (vf->vsi.seid != 0)
5528 ixl_disable_rings(&vf->vsi);
5530 ixl_vf_release_resources(pf, vf);
5531 ixl_vf_setup_vsi(pf, vf);
5532 ixl_vf_map_queues(pf, vf);
5534 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5539 ixl_vc_opcode_str(uint16_t op)
5543 case I40E_VIRTCHNL_OP_VERSION:
5545 case I40E_VIRTCHNL_OP_RESET_VF:
5546 return ("RESET_VF");
5547 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5548 return ("GET_VF_RESOURCES");
5549 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5550 return ("CONFIG_TX_QUEUE");
5551 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5552 return ("CONFIG_RX_QUEUE");
5553 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5554 return ("CONFIG_VSI_QUEUES");
5555 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5556 return ("CONFIG_IRQ_MAP");
5557 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5558 return ("ENABLE_QUEUES");
5559 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5560 return ("DISABLE_QUEUES");
5561 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5562 return ("ADD_ETHER_ADDRESS");
5563 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5564 return ("DEL_ETHER_ADDRESS");
5565 case I40E_VIRTCHNL_OP_ADD_VLAN:
5566 return ("ADD_VLAN");
5567 case I40E_VIRTCHNL_OP_DEL_VLAN:
5568 return ("DEL_VLAN");
5569 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5570 return ("CONFIG_PROMISCUOUS_MODE");
5571 case I40E_VIRTCHNL_OP_GET_STATS:
5572 return ("GET_STATS");
5573 case I40E_VIRTCHNL_OP_FCOE:
5575 case I40E_VIRTCHNL_OP_EVENT:
5583 ixl_vc_opcode_level(uint16_t opcode)
5587 case I40E_VIRTCHNL_OP_GET_STATS:
5595 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5596 enum i40e_status_code status, void *msg, uint16_t len)
5602 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5604 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5605 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5606 ixl_vc_opcode_str(op), op, status, vf->vf_num);
5608 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5612 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5615 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5619 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5620 enum i40e_status_code status, const char *file, int line)
5623 I40E_VC_DEBUG(pf, 1,
5624 "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5625 ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5626 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5630 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5633 struct i40e_virtchnl_version_info reply;
5635 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5636 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5641 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5642 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5643 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5648 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5652 if (msg_size != 0) {
5653 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5658 ixl_reset_vf(pf, vf);
5660 /* No response to a reset message. */
5664 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5667 struct i40e_virtchnl_vf_resource reply;
5669 if (msg_size != 0) {
5670 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5675 bzero(&reply, sizeof(reply));
5677 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5680 reply.num_queue_pairs = vf->vsi.num_queues;
5681 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5682 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5683 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5684 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5685 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5687 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5688 I40E_SUCCESS, &reply, sizeof(reply));
5692 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5693 struct i40e_virtchnl_txq_info *info)
5696 struct i40e_hmc_obj_txq txq;
5697 uint16_t global_queue_num, global_vf_num;
5698 enum i40e_status_code status;
5702 global_queue_num = vf->vsi.first_queue + info->queue_id;
5703 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5704 bzero(&txq, sizeof(txq));
5706 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5707 if (status != I40E_SUCCESS)
5710 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5712 txq.head_wb_ena = info->headwb_enabled;
5713 txq.head_wb_addr = info->dma_headwb_addr;
5714 txq.qlen = info->ring_len;
5715 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5716 txq.rdylist_act = 0;
5718 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5719 if (status != I40E_SUCCESS)
5722 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5723 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5724 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5725 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5732 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5733 struct i40e_virtchnl_rxq_info *info)
5736 struct i40e_hmc_obj_rxq rxq;
5737 uint16_t global_queue_num;
5738 enum i40e_status_code status;
5741 global_queue_num = vf->vsi.first_queue + info->queue_id;
5742 bzero(&rxq, sizeof(rxq));
5744 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5747 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5748 info->max_pkt_size < ETHER_MIN_LEN)
5751 if (info->splithdr_enabled) {
5752 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5755 rxq.hsplit_0 = info->rx_split_pos &
5756 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5757 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5758 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5759 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5760 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5765 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5766 if (status != I40E_SUCCESS)
5769 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5770 rxq.qlen = info->ring_len;
5772 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5778 rxq.rxmax = info->max_pkt_size;
5779 rxq.tphrdesc_ena = 1;
5780 rxq.tphwdesc_ena = 1;
5781 rxq.tphdata_ena = 1;
5782 rxq.tphhead_ena = 1;
5786 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5787 if (status != I40E_SUCCESS)
5794 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5797 struct i40e_virtchnl_vsi_queue_config_info *info;
5798 struct i40e_virtchnl_queue_pair_info *pair;
5801 if (msg_size < sizeof(*info)) {
5802 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5808 if (info->num_queue_pairs == 0) {
5809 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5814 if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5815 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5820 if (info->vsi_id != vf->vsi.vsi_num) {
5821 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5826 for (i = 0; i < info->num_queue_pairs; i++) {
5827 pair = &info->qpair[i];
5829 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5830 pair->rxq.vsi_id != vf->vsi.vsi_num ||
5831 pair->txq.queue_id != pair->rxq.queue_id ||
5832 pair->txq.queue_id >= vf->vsi.num_queues) {
5834 i40e_send_vf_nack(pf, vf,
5835 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5839 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5840 i40e_send_vf_nack(pf, vf,
5841 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5845 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5846 i40e_send_vf_nack(pf, vf,
5847 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5852 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5856 ixl_vf_set_qctl(struct ixl_pf *pf,
5857 const struct i40e_virtchnl_vector_map *vector,
5858 enum i40e_queue_type cur_type, uint16_t cur_queue,
5859 enum i40e_queue_type *last_type, uint16_t *last_queue)
5861 uint32_t offset, qctl;
5864 if (cur_type == I40E_QUEUE_TYPE_RX) {
5865 offset = I40E_QINT_RQCTL(cur_queue);
5866 itr_indx = vector->rxitr_idx;
5868 offset = I40E_QINT_TQCTL(cur_queue);
5869 itr_indx = vector->txitr_idx;
5872 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5873 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5874 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5875 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5876 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5878 wr32(&pf->hw, offset, qctl);
5880 *last_type = cur_type;
5881 *last_queue = cur_queue;
5885 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5886 const struct i40e_virtchnl_vector_map *vector)
5890 enum i40e_queue_type type, last_type;
5891 uint32_t lnklst_reg;
5892 uint16_t rxq_map, txq_map, cur_queue, last_queue;
5896 rxq_map = vector->rxq_map;
5897 txq_map = vector->txq_map;
5899 last_queue = IXL_END_OF_INTR_LNKLST;
5900 last_type = I40E_QUEUE_TYPE_RX;
5903 * The datasheet says to optimize performance, RX queues and TX queues
5904 * should be interleaved in the interrupt linked list, so we process
5905 * both at once here.
5907 while ((rxq_map != 0) || (txq_map != 0)) {
5909 qindex = ffs(txq_map) - 1;
5910 type = I40E_QUEUE_TYPE_TX;
5911 cur_queue = vf->vsi.first_queue + qindex;
5912 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5913 &last_type, &last_queue);
5914 txq_map &= ~(1 << qindex);
5918 qindex = ffs(rxq_map) - 1;
5919 type = I40E_QUEUE_TYPE_RX;
5920 cur_queue = vf->vsi.first_queue + qindex;
5921 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5922 &last_type, &last_queue);
5923 rxq_map &= ~(1 << qindex);
5927 if (vector->vector_id == 0)
5928 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5930 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5932 wr32(hw, lnklst_reg,
5933 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5934 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5940 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5943 struct i40e_virtchnl_irq_map_info *map;
5944 struct i40e_virtchnl_vector_map *vector;
5946 int i, largest_txq, largest_rxq;
5950 if (msg_size < sizeof(*map)) {
5951 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5957 if (map->num_vectors == 0) {
5958 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5963 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5964 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5969 for (i = 0; i < map->num_vectors; i++) {
5970 vector = &map->vecmap[i];
5972 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5973 vector->vsi_id != vf->vsi.vsi_num) {
5974 i40e_send_vf_nack(pf, vf,
5975 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5979 if (vector->rxq_map != 0) {
5980 largest_rxq = fls(vector->rxq_map) - 1;
5981 if (largest_rxq >= vf->vsi.num_queues) {
5982 i40e_send_vf_nack(pf, vf,
5983 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5989 if (vector->txq_map != 0) {
5990 largest_txq = fls(vector->txq_map) - 1;
5991 if (largest_txq >= vf->vsi.num_queues) {
5992 i40e_send_vf_nack(pf, vf,
5993 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5999 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6000 vector->txitr_idx > IXL_MAX_ITR_IDX) {
6001 i40e_send_vf_nack(pf, vf,
6002 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6007 ixl_vf_config_vector(pf, vf, vector);
6010 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6014 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6017 struct i40e_virtchnl_queue_select *select;
6020 if (msg_size != sizeof(*select)) {
6021 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6027 if (select->vsi_id != vf->vsi.vsi_num ||
6028 select->rx_queues == 0 || select->tx_queues == 0) {
6029 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6034 error = ixl_enable_rings(&vf->vsi);
6036 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6041 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6045 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6046 void *msg, uint16_t msg_size)
6048 struct i40e_virtchnl_queue_select *select;
6051 if (msg_size != sizeof(*select)) {
6052 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6058 if (select->vsi_id != vf->vsi.vsi_num ||
6059 select->rx_queues == 0 || select->tx_queues == 0) {
6060 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6065 error = ixl_disable_rings(&vf->vsi);
6067 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6072 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6076 ixl_zero_mac(const uint8_t *addr)
6078 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6080 return (cmp_etheraddr(addr, zero));
6084 ixl_bcast_mac(const uint8_t *addr)
6087 return (cmp_etheraddr(addr, ixl_bcast_addr));
6091 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6094 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6098 * If the VF is not allowed to change its MAC address, don't let it
6099 * set a MAC filter for an address that is not a multicast address and
6100 * is not its assigned MAC.
6102 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6103 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6110 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6113 struct i40e_virtchnl_ether_addr_list *addr_list;
6114 struct i40e_virtchnl_ether_addr *addr;
6115 struct ixl_vsi *vsi;
6117 size_t expected_size;
6121 if (msg_size < sizeof(*addr_list)) {
6122 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6128 expected_size = sizeof(*addr_list) +
6129 addr_list->num_elements * sizeof(*addr);
6131 if (addr_list->num_elements == 0 ||
6132 addr_list->vsi_id != vsi->vsi_num ||
6133 msg_size != expected_size) {
6134 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6139 for (i = 0; i < addr_list->num_elements; i++) {
6140 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6141 i40e_send_vf_nack(pf, vf,
6142 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6147 for (i = 0; i < addr_list->num_elements; i++) {
6148 addr = &addr_list->list[i];
6149 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6152 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6156 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6159 struct i40e_virtchnl_ether_addr_list *addr_list;
6160 struct i40e_virtchnl_ether_addr *addr;
6161 size_t expected_size;
6164 if (msg_size < sizeof(*addr_list)) {
6165 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6171 expected_size = sizeof(*addr_list) +
6172 addr_list->num_elements * sizeof(*addr);
6174 if (addr_list->num_elements == 0 ||
6175 addr_list->vsi_id != vf->vsi.vsi_num ||
6176 msg_size != expected_size) {
6177 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6182 for (i = 0; i < addr_list->num_elements; i++) {
6183 addr = &addr_list->list[i];
6184 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6185 i40e_send_vf_nack(pf, vf,
6186 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6191 for (i = 0; i < addr_list->num_elements; i++) {
6192 addr = &addr_list->list[i];
6193 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6196 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6199 static enum i40e_status_code
6200 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6202 struct i40e_vsi_context vsi_ctx;
6204 vsi_ctx.seid = vf->vsi.seid;
6206 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6207 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6208 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6209 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6210 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6214 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6217 struct i40e_virtchnl_vlan_filter_list *filter_list;
6218 enum i40e_status_code code;
6219 size_t expected_size;
6222 if (msg_size < sizeof(*filter_list)) {
6223 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6229 expected_size = sizeof(*filter_list) +
6230 filter_list->num_elements * sizeof(uint16_t);
6231 if (filter_list->num_elements == 0 ||
6232 filter_list->vsi_id != vf->vsi.vsi_num ||
6233 msg_size != expected_size) {
6234 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6239 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6240 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6245 for (i = 0; i < filter_list->num_elements; i++) {
6246 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6247 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6253 code = ixl_vf_enable_vlan_strip(pf, vf);
6254 if (code != I40E_SUCCESS) {
6255 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6259 for (i = 0; i < filter_list->num_elements; i++)
6260 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6262 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6266 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6269 struct i40e_virtchnl_vlan_filter_list *filter_list;
6271 size_t expected_size;
6273 if (msg_size < sizeof(*filter_list)) {
6274 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6280 expected_size = sizeof(*filter_list) +
6281 filter_list->num_elements * sizeof(uint16_t);
6282 if (filter_list->num_elements == 0 ||
6283 filter_list->vsi_id != vf->vsi.vsi_num ||
6284 msg_size != expected_size) {
6285 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6290 for (i = 0; i < filter_list->num_elements; i++) {
6291 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6292 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6298 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6299 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6304 for (i = 0; i < filter_list->num_elements; i++)
6305 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6307 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6311 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6312 void *msg, uint16_t msg_size)
6314 struct i40e_virtchnl_promisc_info *info;
6315 enum i40e_status_code code;
6317 if (msg_size != sizeof(*info)) {
6318 i40e_send_vf_nack(pf, vf,
6319 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6323 if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6324 i40e_send_vf_nack(pf, vf,
6325 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6330 if (info->vsi_id != vf->vsi.vsi_num) {
6331 i40e_send_vf_nack(pf, vf,
6332 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6336 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6337 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6338 if (code != I40E_SUCCESS) {
6339 i40e_send_vf_nack(pf, vf,
6340 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6344 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6345 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6346 if (code != I40E_SUCCESS) {
6347 i40e_send_vf_nack(pf, vf,
6348 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6352 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6356 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6359 struct i40e_virtchnl_queue_select *queue;
6361 if (msg_size != sizeof(*queue)) {
6362 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6368 if (queue->vsi_id != vf->vsi.vsi_num) {
6369 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6374 ixl_update_eth_stats(&vf->vsi);
6376 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6377 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6381 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6385 uint16_t vf_num, msg_size;
6388 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6389 opcode = le32toh(event->desc.cookie_high);
6391 if (vf_num >= pf->num_vfs) {
6392 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6396 vf = &pf->vfs[vf_num];
6397 msg = event->msg_buf;
6398 msg_size = event->msg_len;
6400 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6401 "Got msg %s(%d) from VF-%d of size %d\n",
6402 ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6405 case I40E_VIRTCHNL_OP_VERSION:
6406 ixl_vf_version_msg(pf, vf, msg, msg_size);
6408 case I40E_VIRTCHNL_OP_RESET_VF:
6409 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6411 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6412 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6414 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6415 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6417 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6418 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6420 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6421 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6423 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6424 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6426 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6427 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6429 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6430 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6432 case I40E_VIRTCHNL_OP_ADD_VLAN:
6433 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6435 case I40E_VIRTCHNL_OP_DEL_VLAN:
6436 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6438 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6439 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6441 case I40E_VIRTCHNL_OP_GET_STATS:
6442 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6445 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6446 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6447 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6449 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6454 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6456 ixl_handle_vflr(void *arg, int pending)
6460 uint16_t global_vf_num;
6461 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6468 for (i = 0; i < pf->num_vfs; i++) {
6469 global_vf_num = hw->func_caps.vf_base_id + i;
6471 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6472 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6473 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6474 if (vflrstat & vflrstat_mask) {
6475 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6478 ixl_reinit_vf(pf, &pf->vfs[i]);
6482 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6483 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6484 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6491 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6495 case I40E_AQ_RC_EPERM:
6497 case I40E_AQ_RC_ENOENT:
6499 case I40E_AQ_RC_ESRCH:
6501 case I40E_AQ_RC_EINTR:
6503 case I40E_AQ_RC_EIO:
6505 case I40E_AQ_RC_ENXIO:
6507 case I40E_AQ_RC_E2BIG:
6509 case I40E_AQ_RC_EAGAIN:
6511 case I40E_AQ_RC_ENOMEM:
6513 case I40E_AQ_RC_EACCES:
6515 case I40E_AQ_RC_EFAULT:
6517 case I40E_AQ_RC_EBUSY:
6519 case I40E_AQ_RC_EEXIST:
6521 case I40E_AQ_RC_EINVAL:
6523 case I40E_AQ_RC_ENOTTY:
6525 case I40E_AQ_RC_ENOSPC:
6527 case I40E_AQ_RC_ENOSYS:
6529 case I40E_AQ_RC_ERANGE:
6531 case I40E_AQ_RC_EFLUSHED:
6532 return (EINVAL); /* No exact equivalent in errno.h */
6533 case I40E_AQ_RC_BAD_ADDR:
6535 case I40E_AQ_RC_EMODE:
6537 case I40E_AQ_RC_EFBIG:
6545 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6549 struct ixl_vsi *pf_vsi;
6550 enum i40e_status_code ret;
6553 pf = device_get_softc(dev);
6558 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6561 if (pf->vfs == NULL) {
6566 for (i = 0; i < num_vfs; i++)
6567 sysctl_ctx_init(&pf->vfs[i].ctx);
6569 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6570 1, FALSE, FALSE, &pf->veb_seid, NULL);
6571 if (ret != I40E_SUCCESS) {
6572 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6573 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6578 ixl_configure_msix(pf);
6579 ixl_enable_adminq(hw);
6581 pf->num_vfs = num_vfs;
6586 free(pf->vfs, M_IXL);
6593 ixl_uninit_iov(device_t dev)
6597 struct ixl_vsi *vsi;
6602 pf = device_get_softc(dev);
6608 for (i = 0; i < pf->num_vfs; i++) {
6609 if (pf->vfs[i].vsi.seid != 0)
6610 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6613 if (pf->veb_seid != 0) {
6614 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6618 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6619 ixl_disable_intr(vsi);
6622 num_vfs = pf->num_vfs;
6628 /* Do this after the unlock as sysctl_ctx_free might sleep. */
6629 for (i = 0; i < num_vfs; i++)
6630 sysctl_ctx_free(&vfs[i].ctx);
6635 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6637 char sysctl_name[QUEUE_NAME_LEN];
6644 pf = device_get_softc(dev);
6645 vf = &pf->vfs[vfnum];
6651 vf->vf_flags = VF_FLAG_ENABLED;
6652 SLIST_INIT(&vf->vsi.ftl);
6654 error = ixl_vf_setup_vsi(pf, vf);
6658 if (nvlist_exists_binary(params, "mac-addr")) {
6659 mac = nvlist_get_binary(params, "mac-addr", &size);
6660 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6662 if (nvlist_get_bool(params, "allow-set-mac"))
6663 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6666 * If the administrator has not specified a MAC address then
6667 * we must allow the VF to choose one.
6669 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6671 if (nvlist_get_bool(params, "mac-anti-spoof"))
6672 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6674 if (nvlist_get_bool(params, "allow-promisc"))
6675 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6677 vf->vf_flags |= VF_FLAG_VLAN_CAP;
6679 ixl_reset_vf(pf, vf);
6683 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6684 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6689 #endif /* PCI_IOV */