1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixl_driver_version[] = "1.4.1";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixl_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
73 /* required last entry */
77 /*********************************************************************
78 * Table of branding strings
79 *********************************************************************/
81 static char *ixl_strings[] = {
82 "Intel(R) Ethernet Connection XL710 Driver"
86 /*********************************************************************
88 *********************************************************************/
89 static int ixl_probe(device_t);
90 static int ixl_attach(device_t);
91 static int ixl_detach(device_t);
92 static int ixl_shutdown(device_t);
93 static int ixl_get_hw_capabilities(struct ixl_pf *);
94 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
96 static void ixl_init(void *);
97 static void ixl_init_locked(struct ixl_pf *);
98 static void ixl_stop(struct ixl_pf *);
99 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
100 static int ixl_media_change(struct ifnet *);
101 static void ixl_update_link_status(struct ixl_pf *);
102 static int ixl_allocate_pci_resources(struct ixl_pf *);
103 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
104 static int ixl_setup_stations(struct ixl_pf *);
105 static int ixl_switch_config(struct ixl_pf *);
106 static int ixl_initialize_vsi(struct ixl_vsi *);
107 static int ixl_assign_vsi_msix(struct ixl_pf *);
108 static int ixl_assign_vsi_legacy(struct ixl_pf *);
109 static int ixl_init_msix(struct ixl_pf *);
110 static void ixl_configure_msix(struct ixl_pf *);
111 static void ixl_configure_itr(struct ixl_pf *);
112 static void ixl_configure_legacy(struct ixl_pf *);
113 static void ixl_free_pci_resources(struct ixl_pf *);
114 static void ixl_local_timer(void *);
115 static int ixl_setup_interface(device_t, struct ixl_vsi *);
116 static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
117 static void ixl_config_rss(struct ixl_vsi *);
118 static void ixl_set_queue_rx_itr(struct ixl_queue *);
119 static void ixl_set_queue_tx_itr(struct ixl_queue *);
120 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
122 static int ixl_enable_rings(struct ixl_vsi *);
123 static int ixl_disable_rings(struct ixl_vsi *);
124 static void ixl_enable_intr(struct ixl_vsi *);
125 static void ixl_disable_intr(struct ixl_vsi *);
126 static void ixl_disable_rings_intr(struct ixl_vsi *);
128 static void ixl_enable_adminq(struct i40e_hw *);
129 static void ixl_disable_adminq(struct i40e_hw *);
130 static void ixl_enable_queue(struct i40e_hw *, int);
131 static void ixl_disable_queue(struct i40e_hw *, int);
132 static void ixl_enable_legacy(struct i40e_hw *);
133 static void ixl_disable_legacy(struct i40e_hw *);
135 static void ixl_set_promisc(struct ixl_vsi *);
136 static void ixl_add_multi(struct ixl_vsi *);
137 static void ixl_del_multi(struct ixl_vsi *);
138 static void ixl_register_vlan(void *, struct ifnet *, u16);
139 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
140 static void ixl_setup_vlan_filters(struct ixl_vsi *);
142 static void ixl_init_filters(struct ixl_vsi *);
143 static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
144 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
145 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
146 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
147 static void ixl_del_hw_filters(struct ixl_vsi *, int);
148 static struct ixl_mac_filter *
149 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
150 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
151 static void ixl_free_mac_filters(struct ixl_vsi *vsi);
154 /* Sysctl debug interface */
155 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
156 static void ixl_print_debug_info(struct ixl_pf *);
158 /* The MSI/X Interrupt handlers */
159 static void ixl_intr(void *);
160 static void ixl_msix_que(void *);
161 static void ixl_msix_adminq(void *);
162 static void ixl_handle_mdd_event(struct ixl_pf *);
164 /* Deferred interrupt tasklets */
165 static void ixl_do_adminq(void *, int);
167 /* Sysctl handlers */
168 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
169 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
170 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
171 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
174 static void ixl_add_hw_stats(struct ixl_pf *);
175 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
176 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
177 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
178 struct sysctl_oid_list *,
179 struct i40e_eth_stats *);
180 static void ixl_update_stats_counters(struct ixl_pf *);
181 static void ixl_update_eth_stats(struct ixl_vsi *);
182 static void ixl_update_vsi_stats(struct ixl_vsi *);
183 static void ixl_pf_reset_stats(struct ixl_pf *);
184 static void ixl_vsi_reset_stats(struct ixl_vsi *);
185 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
187 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
190 #ifdef IXL_DEBUG_SYSCTL
191 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
192 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
193 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
194 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
195 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
199 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
201 static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
202 static void ixl_uninit_iov(device_t dev);
203 static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
205 static void ixl_handle_vf_msg(struct ixl_pf *,
206 struct i40e_arq_event_info *);
207 static void ixl_handle_vflr(void *arg, int pending);
209 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
210 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
213 /*********************************************************************
214 * FreeBSD Device Interface Entry Points
215 *********************************************************************/
217 static device_method_t ixl_methods[] = {
218 /* Device interface */
219 DEVMETHOD(device_probe, ixl_probe),
220 DEVMETHOD(device_attach, ixl_attach),
221 DEVMETHOD(device_detach, ixl_detach),
222 DEVMETHOD(device_shutdown, ixl_shutdown),
224 DEVMETHOD(pci_init_iov, ixl_init_iov),
225 DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
226 DEVMETHOD(pci_add_vf, ixl_add_vf),
231 static driver_t ixl_driver = {
232 "ixl", ixl_methods, sizeof(struct ixl_pf),
235 devclass_t ixl_devclass;
236 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
238 MODULE_DEPEND(ixl, pci, 1, 1, 1);
239 MODULE_DEPEND(ixl, ether, 1, 1, 1);
242 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
243 #endif /* DEV_NETMAP */
247 ** Global reset mutex
249 static struct mtx ixl_reset_mtx;
252 ** TUNEABLE PARAMETERS:
255 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
256 "IXL driver parameters");
259 * MSIX should be the default for best performance,
260 * but this allows it to be forced off for testing.
262 static int ixl_enable_msix = 1;
263 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
264 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
265 "Enable MSI-X interrupts");
268 ** Number of descriptors per ring:
269 ** - TX and RX are the same size
271 static int ixl_ringsz = DEFAULT_RING;
272 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
273 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
274 &ixl_ringsz, 0, "Descriptor Ring Size");
277 ** This can be set manually, if left as 0 the
278 ** number of queues will be calculated based
279 ** on cpus and msix vectors available.
281 int ixl_max_queues = 0;
282 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
283 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
284 &ixl_max_queues, 0, "Number of Queues");
287 ** Controls for Interrupt Throttling
288 ** - true/false for dynamic adjustment
289 ** - default values for static ITR
291 int ixl_dynamic_rx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
294 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
296 int ixl_dynamic_tx_itr = 0;
297 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
298 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
299 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
301 int ixl_rx_itr = IXL_ITR_8K;
302 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
303 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
304 &ixl_rx_itr, 0, "RX Interrupt Rate");
306 int ixl_tx_itr = IXL_ITR_4K;
307 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
308 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
309 &ixl_tx_itr, 0, "TX Interrupt Rate");
312 static int ixl_enable_fdir = 1;
313 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
314 /* Rate at which we sample */
315 int ixl_atr_rate = 20;
316 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
320 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
321 #include <dev/netmap/if_ixl_netmap.h>
322 #endif /* DEV_NETMAP */
324 static char *ixl_fc_string[6] = {
333 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
335 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
336 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
338 /*********************************************************************
339 * Device identification routine
341 * ixl_probe determines if the driver should be loaded on
342 * the hardware based on PCI vendor/device id of the device.
344 * return BUS_PROBE_DEFAULT on success, positive on failure
345 *********************************************************************/
348 ixl_probe(device_t dev)
350 ixl_vendor_info_t *ent;
352 u16 pci_vendor_id, pci_device_id;
353 u16 pci_subvendor_id, pci_subdevice_id;
354 char device_name[256];
355 static bool lock_init = FALSE;
357 INIT_DEBUGOUT("ixl_probe: begin");
359 pci_vendor_id = pci_get_vendor(dev);
360 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
363 pci_device_id = pci_get_device(dev);
364 pci_subvendor_id = pci_get_subvendor(dev);
365 pci_subdevice_id = pci_get_subdevice(dev);
367 ent = ixl_vendor_info_array;
368 while (ent->vendor_id != 0) {
369 if ((pci_vendor_id == ent->vendor_id) &&
370 (pci_device_id == ent->device_id) &&
372 ((pci_subvendor_id == ent->subvendor_id) ||
373 (ent->subvendor_id == 0)) &&
375 ((pci_subdevice_id == ent->subdevice_id) ||
376 (ent->subdevice_id == 0))) {
377 sprintf(device_name, "%s, Version - %s",
378 ixl_strings[ent->index],
380 device_set_desc_copy(dev, device_name);
381 /* One shot mutex init */
382 if (lock_init == FALSE) {
384 mtx_init(&ixl_reset_mtx,
386 "IXL RESET Lock", MTX_DEF);
388 return (BUS_PROBE_DEFAULT);
395 /*********************************************************************
396 * Device initialization routine
398 * The attach entry point is called when the driver is being loaded.
399 * This routine identifies the type of hardware, allocates all resources
400 * and initializes the hardware.
402 * return 0 on success, positive on failure
403 *********************************************************************/
406 ixl_attach(device_t dev)
414 nvlist_t *pf_schema, *vf_schema;
418 INIT_DEBUGOUT("ixl_attach: begin");
420 /* Allocate, clear, and link in our primary soft structure */
421 pf = device_get_softc(dev);
422 pf->dev = pf->osdep.dev = dev;
426 ** Note this assumes we have a single embedded VSI,
427 ** this could be enhanced later to allocate multiple
433 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
435 /* Set up the timer callout */
436 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
439 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
440 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
442 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
444 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
445 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
446 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
447 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
449 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
450 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
451 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
452 pf, 0, ixl_current_speed, "A", "Current Port Speed");
454 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
456 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
457 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
459 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "rx_itr", CTLFLAG_RW,
462 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
464 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
465 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
466 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
467 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
469 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
470 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
471 OID_AUTO, "tx_itr", CTLFLAG_RW,
472 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
474 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
475 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
476 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
477 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
479 #ifdef IXL_DEBUG_SYSCTL
480 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
481 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
482 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
483 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
485 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
486 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
487 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
488 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
490 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
491 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
492 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
493 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
495 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
497 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
498 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
500 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
501 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
502 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
503 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
506 /* Save off the PCI information */
507 hw->vendor_id = pci_get_vendor(dev);
508 hw->device_id = pci_get_device(dev);
509 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
510 hw->subsystem_vendor_id =
511 pci_read_config(dev, PCIR_SUBVEND_0, 2);
512 hw->subsystem_device_id =
513 pci_read_config(dev, PCIR_SUBDEV_0, 2);
515 hw->bus.device = pci_get_slot(dev);
516 hw->bus.func = pci_get_function(dev);
518 pf->vc_debug_lvl = 1;
520 /* Do PCI setup - map BAR0, etc */
521 if (ixl_allocate_pci_resources(pf)) {
522 device_printf(dev, "Allocation of PCI resources failed\n");
527 /* Create for initial debugging use */
528 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
529 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
530 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
531 ixl_debug_info, "I", "Debug Information");
534 /* Establish a clean starting point */
536 error = i40e_pf_reset(hw);
538 device_printf(dev,"PF reset failure %x\n", error);
543 /* Set admin queue parameters */
544 hw->aq.num_arq_entries = IXL_AQ_LEN;
545 hw->aq.num_asq_entries = IXL_AQ_LEN;
546 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
547 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
549 /* Initialize the shared code */
550 error = i40e_init_shared_code(hw);
552 device_printf(dev,"Unable to initialize the shared code\n");
557 /* Set up the admin queue */
558 error = i40e_init_adminq(hw);
560 device_printf(dev, "The driver for the device stopped "
561 "because the NVM image is newer than expected.\n"
562 "You must install the most recent version of "
563 " the network driver.\n");
566 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
568 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
569 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
570 device_printf(dev, "The driver for the device detected "
571 "a newer version of the NVM image than expected.\n"
572 "Please install the most recent version of the network driver.\n");
573 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
574 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
575 device_printf(dev, "The driver for the device detected "
576 "an older version of the NVM image than expected.\n"
577 "Please update the NVM image.\n");
580 i40e_clear_pxe_mode(hw);
582 /* Get capabilities from the device */
583 error = ixl_get_hw_capabilities(pf);
585 device_printf(dev, "HW capabilities failure!\n");
589 /* Set up host memory cache */
590 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
591 hw->func_caps.num_rx_qp, 0, 0);
593 device_printf(dev, "init_lan_hmc failed: %d\n", error);
597 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
599 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
603 /* Disable LLDP from the firmware */
604 i40e_aq_stop_lldp(hw, TRUE, NULL);
606 i40e_get_mac_addr(hw, hw->mac.addr);
607 error = i40e_validate_mac_addr(hw->mac.addr);
609 device_printf(dev, "validate_mac_addr failed: %d\n", error);
612 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
613 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
615 /* Set up VSI and queues */
616 if (ixl_setup_stations(pf) != 0) {
617 device_printf(dev, "setup stations failed!\n");
622 /* Initialize mac filter list for VSI */
623 SLIST_INIT(&vsi->ftl);
625 /* Set up interrupt routing here */
627 error = ixl_assign_vsi_msix(pf);
629 error = ixl_assign_vsi_legacy(pf);
633 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
634 (hw->aq.fw_maj_ver < 4)) {
636 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
638 device_printf(dev, "link restart failed, aq_err=%d\n",
639 pf->hw.aq.asq_last_status);
642 /* Determine link state */
643 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
644 pf->link_up = i40e_get_link_status(hw);
646 /* Setup OS specific network interface */
647 if (ixl_setup_interface(dev, vsi) != 0) {
648 device_printf(dev, "interface setup failed!\n");
653 error = ixl_switch_config(pf);
655 device_printf(dev, "Initial switch config failed: %d\n", error);
659 /* Limit phy interrupts to link and modules failure */
660 error = i40e_aq_set_phy_int_mask(hw,
661 I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
663 device_printf(dev, "set phy mask failed: %d\n", error);
665 /* Get the bus configuration and set the shared code */
666 bus = ixl_get_bus_info(hw, dev);
667 i40e_set_pci_config_data(hw, bus);
669 /* Initialize statistics */
670 ixl_pf_reset_stats(pf);
671 ixl_update_stats_counters(pf);
672 ixl_add_hw_stats(pf);
674 /* Register for VLAN events */
675 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
676 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
677 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
678 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
681 /* SR-IOV is only supported when MSI-X is in use. */
683 pf_schema = pci_iov_schema_alloc_node();
684 vf_schema = pci_iov_schema_alloc_node();
685 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
686 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
687 IOV_SCHEMA_HASDEFAULT, TRUE);
688 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
689 IOV_SCHEMA_HASDEFAULT, FALSE);
690 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
691 IOV_SCHEMA_HASDEFAULT, FALSE);
693 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
696 "Failed to initialize SR-IOV (error=%d)\n",
702 ixl_netmap_attach(vsi);
703 #endif /* DEV_NETMAP */
705 INIT_DEBUGOUT("ixl_attach: end");
709 if (vsi->ifp != NULL)
712 i40e_shutdown_lan_hmc(hw);
714 i40e_shutdown_adminq(hw);
716 ixl_free_pci_resources(pf);
718 IXL_PF_LOCK_DESTROY(pf);
722 /*********************************************************************
723 * Device removal routine
725 * The detach entry point is called when the driver is being removed.
726 * This routine stops the adapter and deallocates all the resources
727 * that were allocated for driver operation.
729 * return 0 on success, positive on failure
730 *********************************************************************/
733 ixl_detach(device_t dev)
735 struct ixl_pf *pf = device_get_softc(dev);
736 struct i40e_hw *hw = &pf->hw;
737 struct ixl_vsi *vsi = &pf->vsi;
738 struct ixl_queue *que = vsi->queues;
744 INIT_DEBUGOUT("ixl_detach: begin");
746 /* Make sure VLANS are not using driver */
747 if (vsi->ifp->if_vlantrunk != NULL) {
748 device_printf(dev,"Vlan in use, detach first\n");
753 error = pci_iov_detach(dev);
755 device_printf(dev, "SR-IOV in use; detach first.\n");
760 ether_ifdetach(vsi->ifp);
761 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
767 for (int i = 0; i < vsi->num_queues; i++, que++) {
769 taskqueue_drain(que->tq, &que->task);
770 taskqueue_drain(que->tq, &que->tx_task);
771 taskqueue_free(que->tq);
775 /* Shutdown LAN HMC */
776 status = i40e_shutdown_lan_hmc(hw);
779 "Shutdown LAN HMC failed with code %d\n", status);
781 /* Shutdown admin queue */
782 status = i40e_shutdown_adminq(hw);
785 "Shutdown Admin queue failed with code %d\n", status);
787 /* Unregister VLAN events */
788 if (vsi->vlan_attach != NULL)
789 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
790 if (vsi->vlan_detach != NULL)
791 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
793 callout_drain(&pf->timer);
795 netmap_detach(vsi->ifp);
796 #endif /* DEV_NETMAP */
797 ixl_free_pci_resources(pf);
798 bus_generic_detach(dev);
801 IXL_PF_LOCK_DESTROY(pf);
805 /*********************************************************************
807 * Shutdown entry point
809 **********************************************************************/
812 ixl_shutdown(device_t dev)
814 struct ixl_pf *pf = device_get_softc(dev);
822 /*********************************************************************
824 * Get the hardware capabilities
826 **********************************************************************/
829 ixl_get_hw_capabilities(struct ixl_pf *pf)
831 struct i40e_aqc_list_capabilities_element_resp *buf;
832 struct i40e_hw *hw = &pf->hw;
833 device_t dev = pf->dev;
838 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
840 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
841 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
842 device_printf(dev, "Unable to allocate cap memory\n");
846 /* This populates the hw struct */
847 error = i40e_aq_discover_capabilities(hw, buf, len,
848 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
850 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
852 /* retry once with a larger buffer */
856 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
857 device_printf(dev, "capability discovery failed: %d\n",
858 pf->hw.aq.asq_last_status);
862 /* Capture this PF's starting queue pair */
863 pf->qbase = hw->func_caps.base_queue;
866 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
867 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
868 hw->pf_id, hw->func_caps.num_vfs,
869 hw->func_caps.num_msix_vectors,
870 hw->func_caps.num_msix_vectors_vf,
871 hw->func_caps.fd_filters_guaranteed,
872 hw->func_caps.fd_filters_best_effort,
873 hw->func_caps.num_tx_qp,
874 hw->func_caps.num_rx_qp,
875 hw->func_caps.base_queue);
881 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
883 device_t dev = vsi->dev;
885 /* Enable/disable TXCSUM/TSO4 */
886 if (!(ifp->if_capenable & IFCAP_TXCSUM)
887 && !(ifp->if_capenable & IFCAP_TSO4)) {
888 if (mask & IFCAP_TXCSUM) {
889 ifp->if_capenable |= IFCAP_TXCSUM;
890 /* enable TXCSUM, restore TSO if previously enabled */
891 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
892 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
893 ifp->if_capenable |= IFCAP_TSO4;
896 else if (mask & IFCAP_TSO4) {
897 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
898 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
900 "TSO4 requires txcsum, enabling both...\n");
902 } else if((ifp->if_capenable & IFCAP_TXCSUM)
903 && !(ifp->if_capenable & IFCAP_TSO4)) {
904 if (mask & IFCAP_TXCSUM)
905 ifp->if_capenable &= ~IFCAP_TXCSUM;
906 else if (mask & IFCAP_TSO4)
907 ifp->if_capenable |= IFCAP_TSO4;
908 } else if((ifp->if_capenable & IFCAP_TXCSUM)
909 && (ifp->if_capenable & IFCAP_TSO4)) {
910 if (mask & IFCAP_TXCSUM) {
911 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
912 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
914 "TSO4 requires txcsum, disabling both...\n");
915 } else if (mask & IFCAP_TSO4)
916 ifp->if_capenable &= ~IFCAP_TSO4;
919 /* Enable/disable TXCSUM_IPV6/TSO6 */
920 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
921 && !(ifp->if_capenable & IFCAP_TSO6)) {
922 if (mask & IFCAP_TXCSUM_IPV6) {
923 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
924 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
925 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
926 ifp->if_capenable |= IFCAP_TSO6;
928 } else if (mask & IFCAP_TSO6) {
929 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
930 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
932 "TSO6 requires txcsum6, enabling both...\n");
934 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
935 && !(ifp->if_capenable & IFCAP_TSO6)) {
936 if (mask & IFCAP_TXCSUM_IPV6)
937 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
938 else if (mask & IFCAP_TSO6)
939 ifp->if_capenable |= IFCAP_TSO6;
940 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
941 && (ifp->if_capenable & IFCAP_TSO6)) {
942 if (mask & IFCAP_TXCSUM_IPV6) {
943 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
944 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
946 "TSO6 requires txcsum6, disabling both...\n");
947 } else if (mask & IFCAP_TSO6)
948 ifp->if_capenable &= ~IFCAP_TSO6;
952 /*********************************************************************
955 * ixl_ioctl is called when the user wants to configure the
958 * return 0 on success, positive on failure
959 **********************************************************************/
962 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
964 struct ixl_vsi *vsi = ifp->if_softc;
965 struct ixl_pf *pf = vsi->back;
966 struct ifreq *ifr = (struct ifreq *) data;
967 #if defined(INET) || defined(INET6)
968 struct ifaddr *ifa = (struct ifaddr *)data;
969 bool avoid_reset = FALSE;
977 if (ifa->ifa_addr->sa_family == AF_INET)
981 if (ifa->ifa_addr->sa_family == AF_INET6)
984 #if defined(INET) || defined(INET6)
986 ** Calling init results in link renegotiation,
987 ** so we avoid doing it when possible.
990 ifp->if_flags |= IFF_UP;
991 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
994 if (!(ifp->if_flags & IFF_NOARP))
995 arp_ifinit(ifp, ifa);
998 error = ether_ioctl(ifp, command, data);
1002 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1003 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1004 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1008 ifp->if_mtu = ifr->ifr_mtu;
1009 vsi->max_frame_size =
1010 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1011 + ETHER_VLAN_ENCAP_LEN;
1012 ixl_init_locked(pf);
1017 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1019 if (ifp->if_flags & IFF_UP) {
1020 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1021 if ((ifp->if_flags ^ pf->if_flags) &
1022 (IFF_PROMISC | IFF_ALLMULTI)) {
1023 ixl_set_promisc(vsi);
1026 ixl_init_locked(pf);
1028 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1030 pf->if_flags = ifp->if_flags;
1034 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1035 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1037 ixl_disable_intr(vsi);
1039 ixl_enable_intr(vsi);
1044 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1045 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1047 ixl_disable_intr(vsi);
1049 ixl_enable_intr(vsi);
1055 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1056 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1060 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1061 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1063 ixl_cap_txcsum_tso(vsi, ifp, mask);
1065 if (mask & IFCAP_RXCSUM)
1066 ifp->if_capenable ^= IFCAP_RXCSUM;
1067 if (mask & IFCAP_RXCSUM_IPV6)
1068 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1069 if (mask & IFCAP_LRO)
1070 ifp->if_capenable ^= IFCAP_LRO;
1071 if (mask & IFCAP_VLAN_HWTAGGING)
1072 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1073 if (mask & IFCAP_VLAN_HWFILTER)
1074 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1075 if (mask & IFCAP_VLAN_HWTSO)
1076 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1077 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1079 ixl_init_locked(pf);
1082 VLAN_CAPABILITIES(ifp);
1088 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1089 error = ether_ioctl(ifp, command, data);
1097 /*********************************************************************
1100 * This routine is used in two ways. It is used by the stack as
1101 * init entry point in network interface structure. It is also used
1102 * by the driver as a hw/sw initialization routine to get to a
1105 * return 0 on success, positive on failure
1106 **********************************************************************/
1109 ixl_init_locked(struct ixl_pf *pf)
1111 struct i40e_hw *hw = &pf->hw;
1112 struct ixl_vsi *vsi = &pf->vsi;
1113 struct ifnet *ifp = vsi->ifp;
1114 device_t dev = pf->dev;
1115 struct i40e_filter_control_settings filter;
1116 u8 tmpaddr[ETHER_ADDR_LEN];
1119 mtx_assert(&pf->pf_mtx, MA_OWNED);
1120 INIT_DEBUGOUT("ixl_init: begin");
1123 /* Get the latest mac address... User might use a LAA */
1124 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1125 I40E_ETH_LENGTH_OF_ADDRESS);
1126 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1127 i40e_validate_mac_addr(tmpaddr)) {
1128 bcopy(tmpaddr, hw->mac.addr,
1129 I40E_ETH_LENGTH_OF_ADDRESS);
1130 ret = i40e_aq_mac_address_write(hw,
1131 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1132 hw->mac.addr, NULL);
1134 device_printf(dev, "LLA address"
1135 "change failed!!\n");
1140 /* Set the various hardware offload abilities */
1141 ifp->if_hwassist = 0;
1142 if (ifp->if_capenable & IFCAP_TSO)
1143 ifp->if_hwassist |= CSUM_TSO;
1144 if (ifp->if_capenable & IFCAP_TXCSUM)
1145 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1146 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1147 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1149 /* Set up the device filtering */
1150 bzero(&filter, sizeof(filter));
1151 filter.enable_ethtype = TRUE;
1152 filter.enable_macvlan = TRUE;
1154 filter.enable_fdir = TRUE;
1156 if (i40e_set_filter_control(hw, &filter))
1157 device_printf(dev, "set_filter_control() failed\n");
1160 ixl_config_rss(vsi);
1163 ** Prepare the VSI: rings, hmc contexts, etc...
1165 if (ixl_initialize_vsi(vsi)) {
1166 device_printf(dev, "initialize vsi failed!!\n");
1170 /* Add protocol filters to list */
1171 ixl_init_filters(vsi);
1173 /* Setup vlan's if needed */
1174 ixl_setup_vlan_filters(vsi);
1176 /* Start the local timer */
1177 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1179 /* Set up MSI/X routing and the ITR settings */
1180 if (ixl_enable_msix) {
1181 ixl_configure_msix(pf);
1182 ixl_configure_itr(pf);
1184 ixl_configure_legacy(pf);
1186 ixl_enable_rings(vsi);
1188 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1190 ixl_reconfigure_filters(vsi);
1192 /* Set MTU in hardware*/
1193 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1196 device_printf(vsi->dev,
1197 "aq_set_mac_config in init error, code %d\n",
1200 /* And now turn on interrupts */
1201 ixl_enable_intr(vsi);
1203 /* Now inform the stack we're ready */
1204 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1205 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1213 struct ixl_pf *pf = arg;
1216 ixl_init_locked(pf);
1223 ** MSIX Interrupt Handlers and Tasklets
1227 ixl_handle_que(void *context, int pending)
1229 struct ixl_queue *que = context;
1230 struct ixl_vsi *vsi = que->vsi;
1231 struct i40e_hw *hw = vsi->hw;
1232 struct tx_ring *txr = &que->txr;
1233 struct ifnet *ifp = vsi->ifp;
1236 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1237 more = ixl_rxeof(que, IXL_RX_LIMIT);
1240 if (!drbr_empty(ifp, txr->br))
1241 ixl_mq_start_locked(ifp, txr);
1244 taskqueue_enqueue(que->tq, &que->task);
1249 /* Reenable this interrupt - hmmm */
1250 ixl_enable_queue(hw, que->me);
1255 /*********************************************************************
1257 * Legacy Interrupt Service routine
1259 **********************************************************************/
1263 struct ixl_pf *pf = arg;
1264 struct i40e_hw *hw = &pf->hw;
1265 struct ixl_vsi *vsi = &pf->vsi;
1266 struct ixl_queue *que = vsi->queues;
1267 struct ifnet *ifp = vsi->ifp;
1268 struct tx_ring *txr = &que->txr;
1269 u32 reg, icr0, mask;
1270 bool more_tx, more_rx;
1274 /* Protect against spurious interrupts */
1275 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1278 icr0 = rd32(hw, I40E_PFINT_ICR0);
1280 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1281 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1282 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1284 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1287 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1288 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1291 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1292 taskqueue_enqueue(pf->tq, &pf->adminq);
1296 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1299 more_tx = ixl_txeof(que);
1300 if (!drbr_empty(vsi->ifp, txr->br))
1304 /* re-enable other interrupt causes */
1305 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1307 /* And now the queues */
1308 reg = rd32(hw, I40E_QINT_RQCTL(0));
1309 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1310 wr32(hw, I40E_QINT_RQCTL(0), reg);
1312 reg = rd32(hw, I40E_QINT_TQCTL(0));
1313 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1314 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1315 wr32(hw, I40E_QINT_TQCTL(0), reg);
1317 ixl_enable_legacy(hw);
1323 /*********************************************************************
1325 * MSIX VSI Interrupt Service routine
1327 **********************************************************************/
1329 ixl_msix_que(void *arg)
1331 struct ixl_queue *que = arg;
1332 struct ixl_vsi *vsi = que->vsi;
1333 struct i40e_hw *hw = vsi->hw;
1334 struct tx_ring *txr = &que->txr;
1335 bool more_tx, more_rx;
1337 /* Protect against spurious interrupts */
1338 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1343 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1346 more_tx = ixl_txeof(que);
1348 ** Make certain that if the stack
1349 ** has anything queued the task gets
1350 ** scheduled to handle it.
1352 if (!drbr_empty(vsi->ifp, txr->br))
1356 ixl_set_queue_rx_itr(que);
1357 ixl_set_queue_tx_itr(que);
1359 if (more_tx || more_rx)
1360 taskqueue_enqueue(que->tq, &que->task);
1362 ixl_enable_queue(hw, que->me);
1368 /*********************************************************************
1370 * MSIX Admin Queue Interrupt Service routine
1372 **********************************************************************/
1374 ixl_msix_adminq(void *arg)
1376 struct ixl_pf *pf = arg;
1377 struct i40e_hw *hw = &pf->hw;
1382 reg = rd32(hw, I40E_PFINT_ICR0);
1383 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1385 /* Check on the cause */
1386 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1387 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1389 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1390 ixl_handle_mdd_event(pf);
1391 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1395 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1396 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1397 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1401 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1402 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1403 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1405 taskqueue_enqueue(pf->tq, &pf->adminq);
1409 /*********************************************************************
1411 * Media Ioctl callback
1413 * This routine is called whenever the user queries the status of
1414 * the interface using ifconfig.
1416 **********************************************************************/
1418 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1420 struct ixl_vsi *vsi = ifp->if_softc;
1421 struct ixl_pf *pf = vsi->back;
1422 struct i40e_hw *hw = &pf->hw;
1424 INIT_DEBUGOUT("ixl_media_status: begin");
1427 hw->phy.get_link_info = TRUE;
1428 pf->link_up = i40e_get_link_status(hw);
1429 ixl_update_link_status(pf);
1431 ifmr->ifm_status = IFM_AVALID;
1432 ifmr->ifm_active = IFM_ETHER;
1439 ifmr->ifm_status |= IFM_ACTIVE;
1440 /* Hardware is always full-duplex */
1441 ifmr->ifm_active |= IFM_FDX;
1443 switch (hw->phy.link_info.phy_type) {
1445 case I40E_PHY_TYPE_100BASE_TX:
1446 ifmr->ifm_active |= IFM_100_TX;
1449 case I40E_PHY_TYPE_1000BASE_T:
1450 ifmr->ifm_active |= IFM_1000_T;
1452 case I40E_PHY_TYPE_1000BASE_SX:
1453 ifmr->ifm_active |= IFM_1000_SX;
1455 case I40E_PHY_TYPE_1000BASE_LX:
1456 ifmr->ifm_active |= IFM_1000_LX;
1459 case I40E_PHY_TYPE_10GBASE_CR1:
1460 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1461 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1462 /* Using this until a real KR media type */
1463 case I40E_PHY_TYPE_10GBASE_KR:
1464 case I40E_PHY_TYPE_10GBASE_KX4:
1465 ifmr->ifm_active |= IFM_10G_TWINAX;
1467 case I40E_PHY_TYPE_10GBASE_SR:
1468 ifmr->ifm_active |= IFM_10G_SR;
1470 case I40E_PHY_TYPE_10GBASE_LR:
1471 ifmr->ifm_active |= IFM_10G_LR;
1473 case I40E_PHY_TYPE_10GBASE_T:
1474 ifmr->ifm_active |= IFM_10G_T;
1477 case I40E_PHY_TYPE_40GBASE_CR4:
1478 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1479 ifmr->ifm_active |= IFM_40G_CR4;
1481 case I40E_PHY_TYPE_40GBASE_SR4:
1482 ifmr->ifm_active |= IFM_40G_SR4;
1484 case I40E_PHY_TYPE_40GBASE_LR4:
1485 ifmr->ifm_active |= IFM_40G_LR4;
1488 ** Set these to CR4 because OS does not
1489 ** have types available yet.
1491 case I40E_PHY_TYPE_40GBASE_KR4:
1492 case I40E_PHY_TYPE_XLAUI:
1493 case I40E_PHY_TYPE_XLPPI:
1494 case I40E_PHY_TYPE_40GBASE_AOC:
1495 ifmr->ifm_active |= IFM_40G_CR4;
1498 ifmr->ifm_active |= IFM_UNKNOWN;
1501 /* Report flow control status as well */
1502 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1503 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1504 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1505 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1512 /*********************************************************************
1514 * Media Ioctl callback
1516 * This routine is called when the user changes speed/duplex using
1517 * media/mediopt option with ifconfig.
1519 **********************************************************************/
1521 ixl_media_change(struct ifnet * ifp)
1523 struct ixl_vsi *vsi = ifp->if_softc;
1524 struct ifmedia *ifm = &vsi->media;
1526 INIT_DEBUGOUT("ixl_media_change: begin");
1528 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1531 if_printf(ifp, "Media change is currently not supported.\n");
1539 ** ATR: Application Targetted Receive - creates a filter
1540 ** based on TX flow info that will keep the receive
1541 ** portion of the flow on the same queue. Based on the
1542 ** implementation this is only available for TCP connections
1545 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1547 struct ixl_vsi *vsi = que->vsi;
1548 struct tx_ring *txr = &que->txr;
1549 struct i40e_filter_program_desc *FDIR;
1553 /* check if ATR is enabled and sample rate */
1554 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1557 ** We sample all TCP SYN/FIN packets,
1558 ** or at the selected sample rate
1561 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1562 (txr->atr_count < txr->atr_rate))
1566 /* Get a descriptor to use */
1567 idx = txr->next_avail;
1568 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1569 if (++idx == que->num_desc)
1572 txr->next_avail = idx;
1574 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1575 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1577 ptype |= (etype == ETHERTYPE_IP) ?
1578 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1579 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1580 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1581 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1583 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1585 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1588 ** We use the TCP TH_FIN as a trigger to remove
1589 ** the filter, otherwise its an update.
1591 dtype |= (th->th_flags & TH_FIN) ?
1592 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1593 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1594 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1595 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1597 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1598 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1600 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1601 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1603 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1604 FDIR->dtype_cmd_cntindex = htole32(dtype);
1611 ixl_set_promisc(struct ixl_vsi *vsi)
1613 struct ifnet *ifp = vsi->ifp;
1614 struct i40e_hw *hw = vsi->hw;
1616 bool uni = FALSE, multi = FALSE;
1618 if (ifp->if_flags & IFF_ALLMULTI)
1620 else { /* Need to count the multicast addresses */
1621 struct ifmultiaddr *ifma;
1622 if_maddr_rlock(ifp);
1623 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1624 if (ifma->ifma_addr->sa_family != AF_LINK)
1626 if (mcnt == MAX_MULTICAST_ADDR)
1630 if_maddr_runlock(ifp);
1633 if (mcnt >= MAX_MULTICAST_ADDR)
1635 if (ifp->if_flags & IFF_PROMISC)
1638 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1639 vsi->seid, uni, NULL);
1640 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1641 vsi->seid, multi, NULL);
1645 /*********************************************************************
1648 * Routines for multicast and vlan filter management.
1650 *********************************************************************/
1652 ixl_add_multi(struct ixl_vsi *vsi)
1654 struct ifmultiaddr *ifma;
1655 struct ifnet *ifp = vsi->ifp;
1656 struct i40e_hw *hw = vsi->hw;
1657 int mcnt = 0, flags;
1659 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1661 if_maddr_rlock(ifp);
1663 ** First just get a count, to decide if we
1664 ** we simply use multicast promiscuous.
1666 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1667 if (ifma->ifma_addr->sa_family != AF_LINK)
1671 if_maddr_runlock(ifp);
1673 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1674 /* delete existing MC filters */
1675 ixl_del_hw_filters(vsi, mcnt);
1676 i40e_aq_set_vsi_multicast_promiscuous(hw,
1677 vsi->seid, TRUE, NULL);
1682 if_maddr_rlock(ifp);
1683 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1684 if (ifma->ifma_addr->sa_family != AF_LINK)
1686 ixl_add_mc_filter(vsi,
1687 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1690 if_maddr_runlock(ifp);
1692 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1693 ixl_add_hw_filters(vsi, flags, mcnt);
1696 IOCTL_DEBUGOUT("ixl_add_multi: end");
1701 ixl_del_multi(struct ixl_vsi *vsi)
1703 struct ifnet *ifp = vsi->ifp;
1704 struct ifmultiaddr *ifma;
1705 struct ixl_mac_filter *f;
1709 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1711 /* Search for removed multicast addresses */
1712 if_maddr_rlock(ifp);
1713 SLIST_FOREACH(f, &vsi->ftl, next) {
1714 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1716 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1717 if (ifma->ifma_addr->sa_family != AF_LINK)
1719 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1720 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1725 if (match == FALSE) {
1726 f->flags |= IXL_FILTER_DEL;
1731 if_maddr_runlock(ifp);
1734 ixl_del_hw_filters(vsi, mcnt);
1738 /*********************************************************************
1741 * This routine checks for link status,updates statistics,
1742 * and runs the watchdog check.
1744 **********************************************************************/
1747 ixl_local_timer(void *arg)
1749 struct ixl_pf *pf = arg;
1750 struct i40e_hw *hw = &pf->hw;
1751 struct ixl_vsi *vsi = &pf->vsi;
1752 struct ixl_queue *que = vsi->queues;
1753 device_t dev = pf->dev;
1757 mtx_assert(&pf->pf_mtx, MA_OWNED);
1759 /* Fire off the adminq task */
1760 taskqueue_enqueue(pf->tq, &pf->adminq);
1763 ixl_update_stats_counters(pf);
1766 ** Check status of the queues
1768 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1769 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1771 for (int i = 0; i < vsi->num_queues; i++,que++) {
1772 /* Any queues with outstanding work get a sw irq */
1774 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1776 ** Each time txeof runs without cleaning, but there
1777 ** are uncleaned descriptors it increments busy. If
1778 ** we get to 5 we declare it hung.
1780 if (que->busy == IXL_QUEUE_HUNG) {
1782 /* Mark the queue as inactive */
1783 vsi->active_queues &= ~((u64)1 << que->me);
1786 /* Check if we've come back from hung */
1787 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1788 vsi->active_queues |= ((u64)1 << que->me);
1790 if (que->busy >= IXL_MAX_TX_BUSY) {
1792 device_printf(dev,"Warning queue %d "
1793 "appears to be hung!\n", i);
1795 que->busy = IXL_QUEUE_HUNG;
1799 /* Only reinit if all queues show hung */
1800 if (hung == vsi->num_queues)
1803 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1807 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1808 ixl_init_locked(pf);
1812 ** Note: this routine updates the OS on the link state
1813 ** the real check of the hardware only happens with
1814 ** a link interrupt.
1817 ixl_update_link_status(struct ixl_pf *pf)
1819 struct ixl_vsi *vsi = &pf->vsi;
1820 struct i40e_hw *hw = &pf->hw;
1821 struct ifnet *ifp = vsi->ifp;
1822 device_t dev = pf->dev;
1825 if (vsi->link_active == FALSE) {
1826 pf->fc = hw->fc.current_mode;
1828 device_printf(dev,"Link is up %d Gbps %s,"
1829 " Flow Control: %s\n",
1831 I40E_LINK_SPEED_40GB)? 40:10),
1832 "Full Duplex", ixl_fc_string[pf->fc]);
1834 vsi->link_active = TRUE;
1836 ** Warn user if link speed on NPAR enabled
1837 ** partition is not at least 10GB
1839 if (hw->func_caps.npar_enable &&
1840 (hw->phy.link_info.link_speed ==
1841 I40E_LINK_SPEED_1GB ||
1842 hw->phy.link_info.link_speed ==
1843 I40E_LINK_SPEED_100MB))
1844 device_printf(dev, "The partition detected"
1845 "link speed that is less than 10Gbps\n");
1846 if_link_state_change(ifp, LINK_STATE_UP);
1848 } else { /* Link down */
1849 if (vsi->link_active == TRUE) {
1851 device_printf(dev,"Link is Down\n");
1852 if_link_state_change(ifp, LINK_STATE_DOWN);
1853 vsi->link_active = FALSE;
1860 /*********************************************************************
1862 * This routine disables all traffic on the adapter by issuing a
1863 * global reset on the MAC and deallocates TX/RX buffers.
1865 **********************************************************************/
1868 ixl_stop(struct ixl_pf *pf)
1870 struct ixl_vsi *vsi = &pf->vsi;
1871 struct ifnet *ifp = vsi->ifp;
1873 mtx_assert(&pf->pf_mtx, MA_OWNED);
1875 INIT_DEBUGOUT("ixl_stop: begin\n");
1876 if (pf->num_vfs == 0)
1877 ixl_disable_intr(vsi);
1879 ixl_disable_rings_intr(vsi);
1880 ixl_disable_rings(vsi);
1882 /* Tell the stack that the interface is no longer active */
1883 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1885 /* Stop the local timer */
1886 callout_stop(&pf->timer);
1892 /*********************************************************************
1894 * Setup MSIX Interrupt resources and handlers for the VSI
1896 **********************************************************************/
1898 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1900 device_t dev = pf->dev;
1901 struct ixl_vsi *vsi = &pf->vsi;
1902 struct ixl_queue *que = vsi->queues;
1907 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1908 &rid, RF_SHAREABLE | RF_ACTIVE);
1909 if (pf->res == NULL) {
1910 device_printf(dev,"Unable to allocate"
1911 " bus resource: vsi legacy/msi interrupt\n");
1915 /* Set the handler function */
1916 error = bus_setup_intr(dev, pf->res,
1917 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1918 ixl_intr, pf, &pf->tag);
1921 device_printf(dev, "Failed to register legacy/msi handler");
1924 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1925 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1926 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1927 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1928 taskqueue_thread_enqueue, &que->tq);
1929 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1930 device_get_nameunit(dev));
1931 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1934 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1937 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1938 taskqueue_thread_enqueue, &pf->tq);
1939 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1940 device_get_nameunit(dev));
1946 /*********************************************************************
1948 * Setup MSIX Interrupt resources and handlers for the VSI
1950 **********************************************************************/
1952 ixl_assign_vsi_msix(struct ixl_pf *pf)
1954 device_t dev = pf->dev;
1955 struct ixl_vsi *vsi = &pf->vsi;
1956 struct ixl_queue *que = vsi->queues;
1957 struct tx_ring *txr;
1958 int error, rid, vector = 0;
1960 /* Admin Que is vector 0*/
1962 pf->res = bus_alloc_resource_any(dev,
1963 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1965 device_printf(dev,"Unable to allocate"
1966 " bus resource: Adminq interrupt [%d]\n", rid);
1969 /* Set the adminq vector and handler */
1970 error = bus_setup_intr(dev, pf->res,
1971 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1972 ixl_msix_adminq, pf, &pf->tag);
1975 device_printf(dev, "Failed to register Admin que handler");
1978 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1979 pf->admvec = vector;
1980 /* Tasklet for Admin Queue */
1981 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1984 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1987 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1988 taskqueue_thread_enqueue, &pf->tq);
1989 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1990 device_get_nameunit(pf->dev));
1993 /* Now set up the stations */
1994 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1998 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1999 RF_SHAREABLE | RF_ACTIVE);
2000 if (que->res == NULL) {
2001 device_printf(dev,"Unable to allocate"
2002 " bus resource: que interrupt [%d]\n", vector);
2005 /* Set the handler function */
2006 error = bus_setup_intr(dev, que->res,
2007 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2008 ixl_msix_que, que, &que->tag);
2011 device_printf(dev, "Failed to register que handler");
2014 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2015 /* Bind the vector to a CPU */
2017 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2019 bus_bind_intr(dev, que->res, cpu_id);
2021 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2022 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2023 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2024 taskqueue_thread_enqueue, &que->tq);
2026 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
2027 cpu_id, "%s (bucket %d)",
2028 device_get_nameunit(dev), cpu_id);
2030 taskqueue_start_threads(&que->tq, 1, PI_NET,
2031 "%s que", device_get_nameunit(dev));
2040 * Allocate MSI/X vectors
2043 ixl_init_msix(struct ixl_pf *pf)
2045 device_t dev = pf->dev;
2046 int rid, want, vectors, queues, available;
2048 /* Override by tuneable */
2049 if (ixl_enable_msix == 0)
2053 ** When used in a virtualized environment
2054 ** PCI BUSMASTER capability may not be set
2055 ** so explicity set it here and rewrite
2056 ** the ENABLE in the MSIX control register
2057 ** at this point to cause the host to
2058 ** successfully initialize us.
2063 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2064 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2065 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2066 pci_find_cap(dev, PCIY_MSIX, &rid);
2067 rid += PCIR_MSIX_CTRL;
2068 msix_ctrl = pci_read_config(dev, rid, 2);
2069 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2070 pci_write_config(dev, rid, msix_ctrl, 2);
2073 /* First try MSI/X */
2074 rid = PCIR_BAR(IXL_BAR);
2075 pf->msix_mem = bus_alloc_resource_any(dev,
2076 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2077 if (!pf->msix_mem) {
2078 /* May not be enabled */
2079 device_printf(pf->dev,
2080 "Unable to map MSIX table \n");
2084 available = pci_msix_count(dev);
2085 if (available == 0) { /* system has msix disabled */
2086 bus_release_resource(dev, SYS_RES_MEMORY,
2088 pf->msix_mem = NULL;
2092 /* Figure out a reasonable auto config value */
2093 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2095 /* Override with hardcoded value if sane */
2096 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2097 queues = ixl_max_queues;
2100 /* If we're doing RSS, clamp at the number of RSS buckets */
2101 if (queues > rss_getnumbuckets())
2102 queues = rss_getnumbuckets();
2106 ** Want one vector (RX/TX pair) per queue
2107 ** plus an additional for the admin queue.
2110 if (want <= available) /* Have enough */
2113 device_printf(pf->dev,
2114 "MSIX Configuration Problem, "
2115 "%d vectors available but %d wanted!\n",
2117 return (0); /* Will go to Legacy setup */
2120 if (pci_alloc_msix(dev, &vectors) == 0) {
2121 device_printf(pf->dev,
2122 "Using MSIX interrupts with %d vectors\n", vectors);
2124 pf->vsi.num_queues = queues;
2127 * If we're doing RSS, the number of queues needs to
2128 * match the number of RSS buckets that are configured.
2130 * + If there's more queues than RSS buckets, we'll end
2131 * up with queues that get no traffic.
2133 * + If there's more RSS buckets than queues, we'll end
2134 * up having multiple RSS buckets map to the same queue,
2135 * so there'll be some contention.
2137 if (queues != rss_getnumbuckets()) {
2139 "%s: queues (%d) != RSS buckets (%d)"
2140 "; performance will be impacted.\n",
2141 __func__, queues, rss_getnumbuckets());
2147 vectors = pci_msi_count(dev);
2148 pf->vsi.num_queues = 1;
2151 ixl_enable_msix = 0;
2152 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2153 device_printf(pf->dev,"Using an MSI interrupt\n");
2156 device_printf(pf->dev,"Using a Legacy interrupt\n");
2163 * Plumb MSI/X vectors
2166 ixl_configure_msix(struct ixl_pf *pf)
2168 struct i40e_hw *hw = &pf->hw;
2169 struct ixl_vsi *vsi = &pf->vsi;
2173 /* First set up the adminq - vector 0 */
2174 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2175 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2177 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2178 I40E_PFINT_ICR0_ENA_GRST_MASK |
2179 I40E_PFINT_ICR0_HMC_ERR_MASK |
2180 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2181 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2182 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2183 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2184 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2186 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2187 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2189 wr32(hw, I40E_PFINT_DYN_CTL0,
2190 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2191 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2193 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2195 /* Next configure the queues */
2196 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2197 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2198 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2200 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2201 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2202 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2203 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2204 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2205 wr32(hw, I40E_QINT_RQCTL(i), reg);
2207 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2208 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2209 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2210 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2211 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2212 if (i == (vsi->num_queues - 1))
2213 reg |= (IXL_QUEUE_EOL
2214 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2215 wr32(hw, I40E_QINT_TQCTL(i), reg);
2220 * Configure for MSI single vector operation
2223 ixl_configure_legacy(struct ixl_pf *pf)
2225 struct i40e_hw *hw = &pf->hw;
2229 wr32(hw, I40E_PFINT_ITR0(0), 0);
2230 wr32(hw, I40E_PFINT_ITR0(1), 0);
2233 /* Setup "other" causes */
2234 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2235 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2236 | I40E_PFINT_ICR0_ENA_GRST_MASK
2237 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2238 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2239 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2240 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2241 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2242 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2243 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2245 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2247 /* SW_ITR_IDX = 0, but don't change INTENA */
2248 wr32(hw, I40E_PFINT_DYN_CTL0,
2249 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2250 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2251 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2252 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2254 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2255 wr32(hw, I40E_PFINT_LNKLST0, 0);
2257 /* Associate the queue pair to the vector and enable the q int */
2258 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2259 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2260 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2261 wr32(hw, I40E_QINT_RQCTL(0), reg);
2263 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2264 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2265 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2266 wr32(hw, I40E_QINT_TQCTL(0), reg);
2268 /* Next enable the queue pair */
2269 reg = rd32(hw, I40E_QTX_ENA(0));
2270 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2271 wr32(hw, I40E_QTX_ENA(0), reg);
2273 reg = rd32(hw, I40E_QRX_ENA(0));
2274 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2275 wr32(hw, I40E_QRX_ENA(0), reg);
2280 * Set the Initial ITR state
2283 ixl_configure_itr(struct ixl_pf *pf)
2285 struct i40e_hw *hw = &pf->hw;
2286 struct ixl_vsi *vsi = &pf->vsi;
2287 struct ixl_queue *que = vsi->queues;
2289 vsi->rx_itr_setting = ixl_rx_itr;
2290 if (ixl_dynamic_rx_itr)
2291 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2292 vsi->tx_itr_setting = ixl_tx_itr;
2293 if (ixl_dynamic_tx_itr)
2294 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2296 for (int i = 0; i < vsi->num_queues; i++, que++) {
2297 struct tx_ring *txr = &que->txr;
2298 struct rx_ring *rxr = &que->rxr;
2300 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2301 vsi->rx_itr_setting);
2302 rxr->itr = vsi->rx_itr_setting;
2303 rxr->latency = IXL_AVE_LATENCY;
2304 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2305 vsi->tx_itr_setting);
2306 txr->itr = vsi->tx_itr_setting;
2307 txr->latency = IXL_AVE_LATENCY;
2313 ixl_allocate_pci_resources(struct ixl_pf *pf)
2316 device_t dev = pf->dev;
2319 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2322 if (!(pf->pci_mem)) {
2323 device_printf(dev,"Unable to allocate bus resource: memory\n");
2327 pf->osdep.mem_bus_space_tag =
2328 rman_get_bustag(pf->pci_mem);
2329 pf->osdep.mem_bus_space_handle =
2330 rman_get_bushandle(pf->pci_mem);
2331 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2332 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2333 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2335 pf->hw.back = &pf->osdep;
2338 ** Now setup MSI or MSI/X, should
2339 ** return us the number of supported
2340 ** vectors. (Will be 1 for MSI)
2342 pf->msix = ixl_init_msix(pf);
2347 ixl_free_pci_resources(struct ixl_pf * pf)
2349 struct ixl_vsi *vsi = &pf->vsi;
2350 struct ixl_queue *que = vsi->queues;
2351 device_t dev = pf->dev;
2354 memrid = PCIR_BAR(IXL_BAR);
2356 /* We may get here before stations are setup */
2357 if ((!ixl_enable_msix) || (que == NULL))
2361 ** Release all msix VSI resources:
2363 for (int i = 0; i < vsi->num_queues; i++, que++) {
2364 rid = que->msix + 1;
2365 if (que->tag != NULL) {
2366 bus_teardown_intr(dev, que->res, que->tag);
2369 if (que->res != NULL)
2370 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2374 /* Clean the AdminQ interrupt last */
2375 if (pf->admvec) /* we are doing MSIX */
2376 rid = pf->admvec + 1;
2378 (pf->msix != 0) ? (rid = 1):(rid = 0);
2380 if (pf->tag != NULL) {
2381 bus_teardown_intr(dev, pf->res, pf->tag);
2384 if (pf->res != NULL)
2385 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2388 pci_release_msi(dev);
2390 if (pf->msix_mem != NULL)
2391 bus_release_resource(dev, SYS_RES_MEMORY,
2392 memrid, pf->msix_mem);
2394 if (pf->pci_mem != NULL)
2395 bus_release_resource(dev, SYS_RES_MEMORY,
2396 PCIR_BAR(0), pf->pci_mem);
2402 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2404 /* Display supported media types */
2405 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2406 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2408 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2409 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2410 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2411 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2412 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2413 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2415 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2416 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2417 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2418 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2419 phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2420 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2421 phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2422 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2423 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2425 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2426 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2427 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2428 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2429 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2430 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2432 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2433 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2434 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2435 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2436 phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2437 /* KR4 uses CR4 until the OS has the real media type */
2438 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2439 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2441 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2442 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2443 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2444 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2447 /*********************************************************************
2449 * Setup networking device structure and register an interface.
2451 **********************************************************************/
2453 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2456 struct i40e_hw *hw = vsi->hw;
2457 struct ixl_queue *que = vsi->queues;
2458 struct i40e_aq_get_phy_abilities_resp abilities;
2459 enum i40e_status_code aq_error = 0;
2461 INIT_DEBUGOUT("ixl_setup_interface: begin");
2463 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2465 device_printf(dev, "can not allocate ifnet structure\n");
2468 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2469 ifp->if_mtu = ETHERMTU;
2470 if_initbaudrate(ifp, IF_Gbps(40));
2471 ifp->if_init = ixl_init;
2472 ifp->if_softc = vsi;
2473 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2474 ifp->if_ioctl = ixl_ioctl;
2476 #if __FreeBSD_version >= 1100036
2477 if_setgetcounterfn(ifp, ixl_get_counter);
2480 ifp->if_transmit = ixl_mq_start;
2482 ifp->if_qflush = ixl_qflush;
2484 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2486 vsi->max_frame_size =
2487 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2488 + ETHER_VLAN_ENCAP_LEN;
2491 * Tell the upper layer(s) we support long frames.
2493 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2495 ifp->if_capabilities |= IFCAP_HWCSUM;
2496 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2497 ifp->if_capabilities |= IFCAP_TSO;
2498 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2499 ifp->if_capabilities |= IFCAP_LRO;
2501 /* VLAN capabilties */
2502 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2505 | IFCAP_VLAN_HWCSUM;
2506 ifp->if_capenable = ifp->if_capabilities;
2509 ** Don't turn this on by default, if vlans are
2510 ** created on another pseudo device (eg. lagg)
2511 ** then vlan events are not passed thru, breaking
2512 ** operation, but with HW FILTER off it works. If
2513 ** using vlans directly on the ixl driver you can
2514 ** enable this and get full hardware tag filtering.
2516 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2519 * Specify the media types supported by this adapter and register
2520 * callbacks to update media and link information
2522 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2525 aq_error = i40e_aq_get_phy_capabilities(hw,
2526 FALSE, TRUE, &abilities, NULL);
2527 /* May need delay to detect fiber correctly */
2528 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2529 i40e_msec_delay(200);
2530 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2531 TRUE, &abilities, NULL);
2534 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2535 device_printf(dev, "Unknown PHY type detected!\n");
2538 "Error getting supported media types, err %d,"
2539 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2543 ixl_add_ifmedia(vsi, abilities.phy_type);
2545 /* Use autoselect media by default */
2546 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2547 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2549 ether_ifattach(ifp, hw->mac.addr);
2555 ** Run when the Admin Queue gets a
2556 ** link transition interrupt.
2559 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2561 struct i40e_hw *hw = &pf->hw;
2562 struct i40e_aqc_get_link_status *status =
2563 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2566 hw->phy.get_link_info = TRUE;
2567 check = i40e_get_link_status(hw);
2568 pf->link_up = check;
2570 printf("Link is %s\n", check ? "up":"down");
2572 /* Report if Unqualified modules are found */
2573 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2574 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2575 (!(status->link_info & I40E_AQ_LINK_UP)))
2576 device_printf(pf->dev, "Link failed because "
2577 "an unqualified module was detected\n");
2582 /*********************************************************************
2584 * Get Firmware Switch configuration
2585 * - this will need to be more robust when more complex
2586 * switch configurations are enabled.
2588 **********************************************************************/
2590 ixl_switch_config(struct ixl_pf *pf)
2592 struct i40e_hw *hw = &pf->hw;
2593 struct ixl_vsi *vsi = &pf->vsi;
2594 device_t dev = vsi->dev;
2595 struct i40e_aqc_get_switch_config_resp *sw_config;
2596 u8 aq_buf[I40E_AQ_LARGE_BUF];
2600 memset(&aq_buf, 0, sizeof(aq_buf));
2601 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2602 ret = i40e_aq_get_switch_config(hw, sw_config,
2603 sizeof(aq_buf), &next, NULL);
2605 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2611 "Switch config: header reported: %d in structure, %d total\n",
2612 sw_config->header.num_reported, sw_config->header.num_total);
2613 for (int i = 0; i < sw_config->header.num_reported; i++) {
2615 "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2616 sw_config->element[i].element_type,
2617 sw_config->element[i].seid,
2618 sw_config->element[i].uplink_seid,
2619 sw_config->element[i].downlink_seid);
2622 /* Simplified due to a single VSI at the moment */
2623 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2624 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2625 vsi->seid = sw_config->element[0].seid;
2629 /*********************************************************************
2631 * Initialize the VSI: this handles contexts, which means things
2632 * like the number of descriptors, buffer size,
2633 * plus we init the rings thru this function.
2635 **********************************************************************/
2637 ixl_initialize_vsi(struct ixl_vsi *vsi)
2639 struct ixl_pf *pf = vsi->back;
2640 struct ixl_queue *que = vsi->queues;
2641 device_t dev = vsi->dev;
2642 struct i40e_hw *hw = vsi->hw;
2643 struct i40e_vsi_context ctxt;
2646 memset(&ctxt, 0, sizeof(ctxt));
2647 ctxt.seid = vsi->seid;
2648 if (pf->veb_seid != 0)
2649 ctxt.uplink_seid = pf->veb_seid;
2650 ctxt.pf_num = hw->pf_id;
2651 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2653 device_printf(dev,"get vsi params failed %x!!\n", err);
2657 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2658 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2659 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2660 ctxt.uplink_seid, ctxt.vsi_number,
2661 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2662 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2663 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2666 ** Set the queue and traffic class bits
2667 ** - when multiple traffic classes are supported
2668 ** this will need to be more robust.
2670 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2671 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2672 ctxt.info.queue_mapping[0] = 0;
2673 ctxt.info.tc_mapping[0] = 0x0800;
2675 /* Set VLAN receive stripping mode */
2676 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2677 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2678 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2679 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2681 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2683 /* Keep copy of VSI info in VSI for statistic counters */
2684 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2686 /* Reset VSI statistics */
2687 ixl_vsi_reset_stats(vsi);
2688 vsi->hw_filters_add = 0;
2689 vsi->hw_filters_del = 0;
2691 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2693 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2695 device_printf(dev,"update vsi params failed %x!!\n",
2696 hw->aq.asq_last_status);
2700 for (int i = 0; i < vsi->num_queues; i++, que++) {
2701 struct tx_ring *txr = &que->txr;
2702 struct rx_ring *rxr = &que->rxr;
2703 struct i40e_hmc_obj_txq tctx;
2704 struct i40e_hmc_obj_rxq rctx;
2709 /* Setup the HMC TX Context */
2710 size = que->num_desc * sizeof(struct i40e_tx_desc);
2711 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2712 tctx.new_context = 1;
2713 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2714 tctx.qlen = que->num_desc;
2716 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2717 /* Enable HEAD writeback */
2718 tctx.head_wb_ena = 1;
2719 tctx.head_wb_addr = txr->dma.pa +
2720 (que->num_desc * sizeof(struct i40e_tx_desc));
2721 tctx.rdylist_act = 0;
2722 err = i40e_clear_lan_tx_queue_context(hw, i);
2724 device_printf(dev, "Unable to clear TX context\n");
2727 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2729 device_printf(dev, "Unable to set TX context\n");
2732 /* Associate the ring with this PF */
2733 txctl = I40E_QTX_CTL_PF_QUEUE;
2734 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2735 I40E_QTX_CTL_PF_INDX_MASK);
2736 wr32(hw, I40E_QTX_CTL(i), txctl);
2739 /* Do ring (re)init */
2740 ixl_init_tx_ring(que);
2742 /* Next setup the HMC RX Context */
2743 if (vsi->max_frame_size <= MCLBYTES)
2744 rxr->mbuf_sz = MCLBYTES;
2746 rxr->mbuf_sz = MJUMPAGESIZE;
2748 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2750 /* Set up an RX context for the HMC */
2751 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2752 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2753 /* ignore header split for now */
2754 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2755 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2756 vsi->max_frame_size : max_rxmax;
2758 rctx.dsize = 1; /* do 32byte descriptors */
2759 rctx.hsplit_0 = 0; /* no HDR split initially */
2760 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2761 rctx.qlen = que->num_desc;
2762 rctx.tphrdesc_ena = 1;
2763 rctx.tphwdesc_ena = 1;
2764 rctx.tphdata_ena = 0;
2765 rctx.tphhead_ena = 0;
2766 rctx.lrxqthresh = 2;
2773 err = i40e_clear_lan_rx_queue_context(hw, i);
2776 "Unable to clear RX context %d\n", i);
2779 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2781 device_printf(dev, "Unable to set RX context %d\n", i);
2784 err = ixl_init_rx_ring(que);
2786 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2789 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2791 /* preserve queue */
2792 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2793 struct netmap_adapter *na = NA(vsi->ifp);
2794 struct netmap_kring *kring = &na->rx_rings[i];
2795 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2796 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2798 #endif /* DEV_NETMAP */
2799 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2805 /*********************************************************************
2807 * Free all VSI structs.
2809 **********************************************************************/
2811 ixl_free_vsi(struct ixl_vsi *vsi)
2813 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2814 struct ixl_queue *que = vsi->queues;
2816 /* Free station queues */
2817 for (int i = 0; i < vsi->num_queues; i++, que++) {
2818 struct tx_ring *txr = &que->txr;
2819 struct rx_ring *rxr = &que->rxr;
2821 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2824 ixl_free_que_tx(que);
2826 i40e_free_dma_mem(&pf->hw, &txr->dma);
2828 IXL_TX_LOCK_DESTROY(txr);
2830 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2833 ixl_free_que_rx(que);
2835 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2837 IXL_RX_LOCK_DESTROY(rxr);
2840 free(vsi->queues, M_DEVBUF);
2842 /* Free VSI filter list */
2843 ixl_free_mac_filters(vsi);
2847 ixl_free_mac_filters(struct ixl_vsi *vsi)
2849 struct ixl_mac_filter *f;
2851 while (!SLIST_EMPTY(&vsi->ftl)) {
2852 f = SLIST_FIRST(&vsi->ftl);
2853 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2859 /*********************************************************************
2861 * Allocate memory for the VSI (virtual station interface) and their
2862 * associated queues, rings and the descriptors associated with each,
2863 * called only once at attach.
2865 **********************************************************************/
2867 ixl_setup_stations(struct ixl_pf *pf)
2869 device_t dev = pf->dev;
2870 struct ixl_vsi *vsi;
2871 struct ixl_queue *que;
2872 struct tx_ring *txr;
2873 struct rx_ring *rxr;
2875 int error = I40E_SUCCESS;
2878 vsi->back = (void *)pf;
2884 /* Get memory for the station queues */
2886 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2887 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2888 device_printf(dev, "Unable to allocate queue memory\n");
2893 for (int i = 0; i < vsi->num_queues; i++) {
2894 que = &vsi->queues[i];
2895 que->num_desc = ixl_ringsz;
2898 /* mark the queue as active */
2899 vsi->active_queues |= (u64)1 << que->me;
2902 txr->tail = I40E_QTX_TAIL(que->me);
2904 /* Initialize the TX lock */
2905 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2906 device_get_nameunit(dev), que->me);
2907 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2908 /* Create the TX descriptor ring */
2909 tsize = roundup2((que->num_desc *
2910 sizeof(struct i40e_tx_desc)) +
2911 sizeof(u32), DBA_ALIGN);
2912 if (i40e_allocate_dma_mem(&pf->hw,
2913 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2915 "Unable to allocate TX Descriptor memory\n");
2919 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2920 bzero((void *)txr->base, tsize);
2921 /* Now allocate transmit soft structs for the ring */
2922 if (ixl_allocate_tx_data(que)) {
2924 "Critical Failure setting up TX structures\n");
2928 /* Allocate a buf ring */
2929 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2930 M_WAITOK, &txr->mtx);
2931 if (txr->br == NULL) {
2933 "Critical Failure setting up TX buf ring\n");
2939 * Next the RX queues...
2941 rsize = roundup2(que->num_desc *
2942 sizeof(union i40e_rx_desc), DBA_ALIGN);
2945 rxr->tail = I40E_QRX_TAIL(que->me);
2947 /* Initialize the RX side lock */
2948 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2949 device_get_nameunit(dev), que->me);
2950 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2952 if (i40e_allocate_dma_mem(&pf->hw,
2953 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2955 "Unable to allocate RX Descriptor memory\n");
2959 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2960 bzero((void *)rxr->base, rsize);
2962 /* Allocate receive soft structs for the ring*/
2963 if (ixl_allocate_rx_data(que)) {
2965 "Critical Failure setting up receive structs\n");
2974 for (int i = 0; i < vsi->num_queues; i++) {
2975 que = &vsi->queues[i];
2979 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2981 i40e_free_dma_mem(&pf->hw, &txr->dma);
2989 ** Provide a update to the queue RX
2990 ** interrupt moderation value.
2993 ixl_set_queue_rx_itr(struct ixl_queue *que)
2995 struct ixl_vsi *vsi = que->vsi;
2996 struct i40e_hw *hw = vsi->hw;
2997 struct rx_ring *rxr = &que->rxr;
3003 /* Idle, do nothing */
3004 if (rxr->bytes == 0)
3007 if (ixl_dynamic_rx_itr) {
3008 rx_bytes = rxr->bytes/rxr->itr;
3011 /* Adjust latency range */
3012 switch (rxr->latency) {
3013 case IXL_LOW_LATENCY:
3014 if (rx_bytes > 10) {
3015 rx_latency = IXL_AVE_LATENCY;
3016 rx_itr = IXL_ITR_20K;
3019 case IXL_AVE_LATENCY:
3020 if (rx_bytes > 20) {
3021 rx_latency = IXL_BULK_LATENCY;
3022 rx_itr = IXL_ITR_8K;
3023 } else if (rx_bytes <= 10) {
3024 rx_latency = IXL_LOW_LATENCY;
3025 rx_itr = IXL_ITR_100K;
3028 case IXL_BULK_LATENCY:
3029 if (rx_bytes <= 20) {
3030 rx_latency = IXL_AVE_LATENCY;
3031 rx_itr = IXL_ITR_20K;
3036 rxr->latency = rx_latency;
3038 if (rx_itr != rxr->itr) {
3039 /* do an exponential smoothing */
3040 rx_itr = (10 * rx_itr * rxr->itr) /
3041 ((9 * rx_itr) + rxr->itr);
3042 rxr->itr = rx_itr & IXL_MAX_ITR;
3043 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3044 que->me), rxr->itr);
3046 } else { /* We may have have toggled to non-dynamic */
3047 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3048 vsi->rx_itr_setting = ixl_rx_itr;
3049 /* Update the hardware if needed */
3050 if (rxr->itr != vsi->rx_itr_setting) {
3051 rxr->itr = vsi->rx_itr_setting;
3052 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3053 que->me), rxr->itr);
3063 ** Provide a update to the queue TX
3064 ** interrupt moderation value.
3067 ixl_set_queue_tx_itr(struct ixl_queue *que)
3069 struct ixl_vsi *vsi = que->vsi;
3070 struct i40e_hw *hw = vsi->hw;
3071 struct tx_ring *txr = &que->txr;
3077 /* Idle, do nothing */
3078 if (txr->bytes == 0)
3081 if (ixl_dynamic_tx_itr) {
3082 tx_bytes = txr->bytes/txr->itr;
3085 switch (txr->latency) {
3086 case IXL_LOW_LATENCY:
3087 if (tx_bytes > 10) {
3088 tx_latency = IXL_AVE_LATENCY;
3089 tx_itr = IXL_ITR_20K;
3092 case IXL_AVE_LATENCY:
3093 if (tx_bytes > 20) {
3094 tx_latency = IXL_BULK_LATENCY;
3095 tx_itr = IXL_ITR_8K;
3096 } else if (tx_bytes <= 10) {
3097 tx_latency = IXL_LOW_LATENCY;
3098 tx_itr = IXL_ITR_100K;
3101 case IXL_BULK_LATENCY:
3102 if (tx_bytes <= 20) {
3103 tx_latency = IXL_AVE_LATENCY;
3104 tx_itr = IXL_ITR_20K;
3109 txr->latency = tx_latency;
3111 if (tx_itr != txr->itr) {
3112 /* do an exponential smoothing */
3113 tx_itr = (10 * tx_itr * txr->itr) /
3114 ((9 * tx_itr) + txr->itr);
3115 txr->itr = tx_itr & IXL_MAX_ITR;
3116 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3117 que->me), txr->itr);
3120 } else { /* We may have have toggled to non-dynamic */
3121 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3122 vsi->tx_itr_setting = ixl_tx_itr;
3123 /* Update the hardware if needed */
3124 if (txr->itr != vsi->tx_itr_setting) {
3125 txr->itr = vsi->tx_itr_setting;
3126 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3127 que->me), txr->itr);
3135 #define QUEUE_NAME_LEN 32
3138 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3139 struct sysctl_ctx_list *ctx, const char *sysctl_name)
3141 struct sysctl_oid *tree;
3142 struct sysctl_oid_list *child;
3143 struct sysctl_oid_list *vsi_list;
3145 tree = device_get_sysctl_tree(pf->dev);
3146 child = SYSCTL_CHILDREN(tree);
3147 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3148 CTLFLAG_RD, NULL, "VSI Number");
3149 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3151 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3155 ixl_add_hw_stats(struct ixl_pf *pf)
3157 device_t dev = pf->dev;
3158 struct ixl_vsi *vsi = &pf->vsi;
3159 struct ixl_queue *queues = vsi->queues;
3160 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3162 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3163 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3164 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3165 struct sysctl_oid_list *vsi_list;
3167 struct sysctl_oid *queue_node;
3168 struct sysctl_oid_list *queue_list;
3170 struct tx_ring *txr;
3171 struct rx_ring *rxr;
3172 char queue_namebuf[QUEUE_NAME_LEN];
3174 /* Driver statistics */
3175 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3176 CTLFLAG_RD, &pf->watchdog_events,
3177 "Watchdog timeouts");
3178 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3179 CTLFLAG_RD, &pf->admin_irq,
3180 "Admin Queue IRQ Handled");
3182 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "vc_debug_level",
3183 CTLFLAG_RW, &pf->vc_debug_lvl, 0,
3184 "PF/VF Virtual Channel debug logging level");
3186 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3187 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3189 /* Queue statistics */
3190 for (int q = 0; q < vsi->num_queues; q++) {
3191 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3192 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3193 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3194 queue_list = SYSCTL_CHILDREN(queue_node);
3196 txr = &(queues[q].txr);
3197 rxr = &(queues[q].rxr);
3199 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3200 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3201 "m_defrag() failed");
3202 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3203 CTLFLAG_RD, &(queues[q].dropped_pkts),
3204 "Driver dropped packets");
3205 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3206 CTLFLAG_RD, &(queues[q].irqs),
3207 "irqs on this queue");
3208 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3209 CTLFLAG_RD, &(queues[q].tso),
3211 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3212 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3213 "Driver tx dma failure in xmit");
3214 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3215 CTLFLAG_RD, &(txr->no_desc),
3216 "Queue No Descriptor Available");
3217 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3218 CTLFLAG_RD, &(txr->total_packets),
3219 "Queue Packets Transmitted");
3220 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3221 CTLFLAG_RD, &(txr->tx_bytes),
3222 "Queue Bytes Transmitted");
3223 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3224 CTLFLAG_RD, &(rxr->rx_packets),
3225 "Queue Packets Received");
3226 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3227 CTLFLAG_RD, &(rxr->rx_bytes),
3228 "Queue Bytes Received");
3232 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3236 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3237 struct sysctl_oid_list *child,
3238 struct i40e_eth_stats *eth_stats)
3240 struct ixl_sysctl_info ctls[] =
3242 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3243 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3244 "Unicast Packets Received"},
3245 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3246 "Multicast Packets Received"},
3247 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3248 "Broadcast Packets Received"},
3249 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3250 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3251 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3252 {ð_stats->tx_multicast, "mcast_pkts_txd",
3253 "Multicast Packets Transmitted"},
3254 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3255 "Broadcast Packets Transmitted"},
3260 struct ixl_sysctl_info *entry = ctls;
3261 while (entry->stat != 0)
3263 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3264 CTLFLAG_RD, entry->stat,
3265 entry->description);
3271 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3272 struct sysctl_oid_list *child,
3273 struct i40e_hw_port_stats *stats)
3275 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3276 CTLFLAG_RD, NULL, "Mac Statistics");
3277 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3279 struct i40e_eth_stats *eth_stats = &stats->eth;
3280 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3282 struct ixl_sysctl_info ctls[] =
3284 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3285 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3286 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3287 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3288 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3289 /* Packet Reception Stats */
3290 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3291 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3292 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3293 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3294 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3295 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3296 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3297 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3298 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3299 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3300 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3301 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3302 /* Packet Transmission Stats */
3303 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3304 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3305 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3306 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3307 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3308 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3309 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3311 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3312 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3313 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3314 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3319 struct ixl_sysctl_info *entry = ctls;
3320 while (entry->stat != 0)
3322 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3323 CTLFLAG_RD, entry->stat,
3324 entry->description);
3330 ** ixl_config_rss - setup RSS
3331 ** - note this is done for the single vsi
3333 static void ixl_config_rss(struct ixl_vsi *vsi)
3335 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3336 struct i40e_hw *hw = vsi->hw;
3338 u64 set_hena = 0, hena;
3341 u32 rss_hash_config;
3342 u32 rss_seed[IXL_KEYSZ];
3344 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3345 0x183cfd8c, 0xce880440, 0x580cbc3c,
3346 0x35897377, 0x328b25e1, 0x4fa98922,
3347 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3351 /* Fetch the configured RSS key */
3352 rss_getkey((uint8_t *) &rss_seed);
3355 /* Fill out hash function seed */
3356 for (i = 0; i < IXL_KEYSZ; i++)
3357 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3359 /* Enable PCTYPES for RSS: */
3361 rss_hash_config = rss_gethashconfig();
3362 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3363 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3364 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3365 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3366 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3367 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3368 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3369 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3370 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3371 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3372 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3373 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3374 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3375 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3378 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3379 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3380 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3381 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3382 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3383 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3384 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3385 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3386 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3387 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3388 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3390 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3391 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3393 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3394 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3396 /* Populate the LUT with max no. of queues in round robin fashion */
3397 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3398 if (j == vsi->num_queues)
3402 * Fetch the RSS bucket id for the given indirection entry.
3403 * Cap it at the number of configured buckets (which is
3406 que_id = rss_get_indirection_to_bucket(i);
3407 que_id = que_id % vsi->num_queues;
3411 /* lut = 4-byte sliding window of 4 lut entries */
3412 lut = (lut << 8) | (que_id &
3413 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3414 /* On i = 3, we have 4 entries in lut; write to the register */
3416 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3423 ** This routine is run via an vlan config EVENT,
3424 ** it enables us to use the HW Filter table since
3425 ** we can get the vlan id. This just creates the
3426 ** entry in the soft version of the VFTA, init will
3427 ** repopulate the real table.
3430 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3432 struct ixl_vsi *vsi = ifp->if_softc;
3433 struct i40e_hw *hw = vsi->hw;
3434 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3436 if (ifp->if_softc != arg) /* Not our event */
3439 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3444 ixl_add_filter(vsi, hw->mac.addr, vtag);
3449 ** This routine is run via an vlan
3450 ** unconfig EVENT, remove our entry
3451 ** in the soft vfta.
3454 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3456 struct ixl_vsi *vsi = ifp->if_softc;
3457 struct i40e_hw *hw = vsi->hw;
3458 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3460 if (ifp->if_softc != arg)
3463 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3468 ixl_del_filter(vsi, hw->mac.addr, vtag);
3473 ** This routine updates vlan filters, called by init
3474 ** it scans the filter table and then updates the hw
3475 ** after a soft reset.
3478 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3480 struct ixl_mac_filter *f;
3483 if (vsi->num_vlans == 0)
3486 ** Scan the filter list for vlan entries,
3487 ** mark them for addition and then call
3488 ** for the AQ update.
3490 SLIST_FOREACH(f, &vsi->ftl, next) {
3491 if (f->flags & IXL_FILTER_VLAN) {
3499 printf("setup vlan: no filters found!\n");
3502 flags = IXL_FILTER_VLAN;
3503 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3504 ixl_add_hw_filters(vsi, flags, cnt);
3509 ** Initialize filter list and add filters that the hardware
3510 ** needs to know about.
3513 ixl_init_filters(struct ixl_vsi *vsi)
3515 /* Add broadcast address */
3516 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3520 ** This routine adds mulicast filters
3523 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3525 struct ixl_mac_filter *f;
3527 /* Does one already exist */
3528 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3532 f = ixl_get_filter(vsi);
3534 printf("WARNING: no filter available!!\n");
3537 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3538 f->vlan = IXL_VLAN_ANY;
3539 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3546 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3549 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3553 ** This routine adds macvlan filters
3556 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3558 struct ixl_mac_filter *f, *tmp;
3562 DEBUGOUT("ixl_add_filter: begin");
3567 /* Does one already exist */
3568 f = ixl_find_filter(vsi, macaddr, vlan);
3572 ** Is this the first vlan being registered, if so we
3573 ** need to remove the ANY filter that indicates we are
3574 ** not in a vlan, and replace that with a 0 filter.
3576 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3577 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3579 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3580 ixl_add_filter(vsi, macaddr, 0);
3584 f = ixl_get_filter(vsi);
3586 device_printf(dev, "WARNING: no filter available!!\n");
3589 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3591 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3592 if (f->vlan != IXL_VLAN_ANY)
3593 f->flags |= IXL_FILTER_VLAN;
3597 ixl_add_hw_filters(vsi, f->flags, 1);
3602 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3604 struct ixl_mac_filter *f;
3606 f = ixl_find_filter(vsi, macaddr, vlan);
3610 f->flags |= IXL_FILTER_DEL;
3611 ixl_del_hw_filters(vsi, 1);
3614 /* Check if this is the last vlan removal */
3615 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3616 /* Switch back to a non-vlan filter */
3617 ixl_del_filter(vsi, macaddr, 0);
3618 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3624 ** Find the filter with both matching mac addr and vlan id
3626 static struct ixl_mac_filter *
3627 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3629 struct ixl_mac_filter *f;
3632 SLIST_FOREACH(f, &vsi->ftl, next) {
3633 if (!cmp_etheraddr(f->macaddr, macaddr))
3635 if (f->vlan == vlan) {
3647 ** This routine takes additions to the vsi filter
3648 ** table and creates an Admin Queue call to create
3649 ** the filters in the hardware.
3652 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3654 struct i40e_aqc_add_macvlan_element_data *a, *b;
3655 struct ixl_mac_filter *f;
3664 IXL_PF_LOCK_ASSERT(pf);
3666 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3667 M_DEVBUF, M_NOWAIT | M_ZERO);
3669 device_printf(dev, "add_hw_filters failed to get memory\n");
3674 ** Scan the filter list, each time we find one
3675 ** we add it to the admin queue array and turn off
3678 SLIST_FOREACH(f, &vsi->ftl, next) {
3679 if (f->flags == flags) {
3680 b = &a[j]; // a pox on fvl long names :)
3681 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3682 if (f->vlan == IXL_VLAN_ANY) {
3684 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3686 b->vlan_tag = f->vlan;
3689 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3690 f->flags &= ~IXL_FILTER_ADD;
3697 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3699 device_printf(dev, "aq_add_macvlan err %d, "
3700 "aq_error %d\n", err, hw->aq.asq_last_status);
3702 vsi->hw_filters_add += j;
3709 ** This routine takes removals in the vsi filter
3710 ** table and creates an Admin Queue call to delete
3711 ** the filters in the hardware.
3714 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3716 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3720 struct ixl_mac_filter *f, *f_temp;
3723 DEBUGOUT("ixl_del_hw_filters: begin\n");
3729 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3730 M_DEVBUF, M_NOWAIT | M_ZERO);
3732 printf("del hw filter failed to get memory\n");
3736 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3737 if (f->flags & IXL_FILTER_DEL) {
3738 e = &d[j]; // a pox on fvl long names :)
3739 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3740 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3741 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3742 /* delete entry from vsi list */
3743 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3751 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3752 /* NOTE: returns ENOENT every time but seems to work fine,
3753 so we'll ignore that specific error. */
3754 // TODO: Does this still occur on current firmwares?
3755 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3757 for (int i = 0; i < j; i++)
3758 sc += (!d[i].error_code);
3759 vsi->hw_filters_del += sc;
3761 "Failed to remove %d/%d filters, aq error %d\n",
3762 j - sc, j, hw->aq.asq_last_status);
3764 vsi->hw_filters_del += j;
3768 DEBUGOUT("ixl_del_hw_filters: end\n");
3773 ixl_enable_rings(struct ixl_vsi *vsi)
3775 struct ixl_pf *pf = vsi->back;
3776 struct i40e_hw *hw = &pf->hw;
3781 for (int i = 0; i < vsi->num_queues; i++) {
3782 index = vsi->first_queue + i;
3783 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3785 reg = rd32(hw, I40E_QTX_ENA(index));
3786 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3787 I40E_QTX_ENA_QENA_STAT_MASK;
3788 wr32(hw, I40E_QTX_ENA(index), reg);
3789 /* Verify the enable took */
3790 for (int j = 0; j < 10; j++) {
3791 reg = rd32(hw, I40E_QTX_ENA(index));
3792 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3794 i40e_msec_delay(10);
3796 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3797 device_printf(pf->dev, "TX queue %d disabled!\n",
3802 reg = rd32(hw, I40E_QRX_ENA(index));
3803 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3804 I40E_QRX_ENA_QENA_STAT_MASK;
3805 wr32(hw, I40E_QRX_ENA(index), reg);
3806 /* Verify the enable took */
3807 for (int j = 0; j < 10; j++) {
3808 reg = rd32(hw, I40E_QRX_ENA(index));
3809 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3811 i40e_msec_delay(10);
3813 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3814 device_printf(pf->dev, "RX queue %d disabled!\n",
3824 ixl_disable_rings(struct ixl_vsi *vsi)
3826 struct ixl_pf *pf = vsi->back;
3827 struct i40e_hw *hw = &pf->hw;
3832 for (int i = 0; i < vsi->num_queues; i++) {
3833 index = vsi->first_queue + i;
3835 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3836 i40e_usec_delay(500);
3838 reg = rd32(hw, I40E_QTX_ENA(index));
3839 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3840 wr32(hw, I40E_QTX_ENA(index), reg);
3841 /* Verify the disable took */
3842 for (int j = 0; j < 10; j++) {
3843 reg = rd32(hw, I40E_QTX_ENA(index));
3844 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3846 i40e_msec_delay(10);
3848 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3849 device_printf(pf->dev, "TX queue %d still enabled!\n",
3854 reg = rd32(hw, I40E_QRX_ENA(index));
3855 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3856 wr32(hw, I40E_QRX_ENA(index), reg);
3857 /* Verify the disable took */
3858 for (int j = 0; j < 10; j++) {
3859 reg = rd32(hw, I40E_QRX_ENA(index));
3860 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3862 i40e_msec_delay(10);
3864 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3865 device_printf(pf->dev, "RX queue %d still enabled!\n",
3875 * ixl_handle_mdd_event
3877 * Called from interrupt handler to identify possibly malicious vfs
3878 * (But also detects events from the PF, as well)
3880 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3882 struct i40e_hw *hw = &pf->hw;
3883 device_t dev = pf->dev;
3884 bool mdd_detected = false;
3885 bool pf_mdd_detected = false;
3888 /* find what triggered the MDD event */
3889 reg = rd32(hw, I40E_GL_MDET_TX);
3890 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3891 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3892 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3893 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3894 I40E_GL_MDET_TX_EVENT_SHIFT;
3895 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3896 I40E_GL_MDET_TX_QUEUE_SHIFT;
3898 "Malicious Driver Detection event 0x%02x"
3899 " on TX queue %d pf number 0x%02x\n",
3900 event, queue, pf_num);
3901 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3902 mdd_detected = true;
3904 reg = rd32(hw, I40E_GL_MDET_RX);
3905 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3906 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3907 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3908 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3909 I40E_GL_MDET_RX_EVENT_SHIFT;
3910 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3911 I40E_GL_MDET_RX_QUEUE_SHIFT;
3913 "Malicious Driver Detection event 0x%02x"
3914 " on RX queue %d of function 0x%02x\n",
3915 event, queue, func);
3916 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3917 mdd_detected = true;
3921 reg = rd32(hw, I40E_PF_MDET_TX);
3922 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3923 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3925 "MDD TX event is for this function 0x%08x",
3927 pf_mdd_detected = true;
3929 reg = rd32(hw, I40E_PF_MDET_RX);
3930 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3931 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3933 "MDD RX event is for this function 0x%08x",
3935 pf_mdd_detected = true;
3939 /* re-enable mdd interrupt cause */
3940 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3941 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3942 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3947 ixl_enable_intr(struct ixl_vsi *vsi)
3949 struct i40e_hw *hw = vsi->hw;
3950 struct ixl_queue *que = vsi->queues;
3952 if (ixl_enable_msix) {
3953 ixl_enable_adminq(hw);
3954 for (int i = 0; i < vsi->num_queues; i++, que++)
3955 ixl_enable_queue(hw, que->me);
3957 ixl_enable_legacy(hw);
3961 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3963 struct i40e_hw *hw = vsi->hw;
3964 struct ixl_queue *que = vsi->queues;
3966 for (int i = 0; i < vsi->num_queues; i++, que++)
3967 ixl_disable_queue(hw, que->me);
3971 ixl_disable_intr(struct ixl_vsi *vsi)
3973 struct i40e_hw *hw = vsi->hw;
3975 if (ixl_enable_msix)
3976 ixl_disable_adminq(hw);
3978 ixl_disable_legacy(hw);
3982 ixl_enable_adminq(struct i40e_hw *hw)
3986 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3987 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3988 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3989 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3995 ixl_disable_adminq(struct i40e_hw *hw)
3999 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4000 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4006 ixl_enable_queue(struct i40e_hw *hw, int id)
4010 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4011 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4012 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4013 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4017 ixl_disable_queue(struct i40e_hw *hw, int id)
4021 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4022 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4028 ixl_enable_legacy(struct i40e_hw *hw)
4031 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4032 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4033 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4034 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4038 ixl_disable_legacy(struct i40e_hw *hw)
4042 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4043 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4049 ixl_update_stats_counters(struct ixl_pf *pf)
4051 struct i40e_hw *hw = &pf->hw;
4052 struct ixl_vsi *vsi = &pf->vsi;
4055 struct i40e_hw_port_stats *nsd = &pf->stats;
4056 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4058 /* Update hw stats */
4059 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4060 pf->stat_offsets_loaded,
4061 &osd->crc_errors, &nsd->crc_errors);
4062 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4063 pf->stat_offsets_loaded,
4064 &osd->illegal_bytes, &nsd->illegal_bytes);
4065 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4066 I40E_GLPRT_GORCL(hw->port),
4067 pf->stat_offsets_loaded,
4068 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4069 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4070 I40E_GLPRT_GOTCL(hw->port),
4071 pf->stat_offsets_loaded,
4072 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4073 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4074 pf->stat_offsets_loaded,
4075 &osd->eth.rx_discards,
4076 &nsd->eth.rx_discards);
4077 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4078 I40E_GLPRT_UPRCL(hw->port),
4079 pf->stat_offsets_loaded,
4080 &osd->eth.rx_unicast,
4081 &nsd->eth.rx_unicast);
4082 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4083 I40E_GLPRT_UPTCL(hw->port),
4084 pf->stat_offsets_loaded,
4085 &osd->eth.tx_unicast,
4086 &nsd->eth.tx_unicast);
4087 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4088 I40E_GLPRT_MPRCL(hw->port),
4089 pf->stat_offsets_loaded,
4090 &osd->eth.rx_multicast,
4091 &nsd->eth.rx_multicast);
4092 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4093 I40E_GLPRT_MPTCL(hw->port),
4094 pf->stat_offsets_loaded,
4095 &osd->eth.tx_multicast,
4096 &nsd->eth.tx_multicast);
4097 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4098 I40E_GLPRT_BPRCL(hw->port),
4099 pf->stat_offsets_loaded,
4100 &osd->eth.rx_broadcast,
4101 &nsd->eth.rx_broadcast);
4102 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4103 I40E_GLPRT_BPTCL(hw->port),
4104 pf->stat_offsets_loaded,
4105 &osd->eth.tx_broadcast,
4106 &nsd->eth.tx_broadcast);
4108 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4109 pf->stat_offsets_loaded,
4110 &osd->tx_dropped_link_down,
4111 &nsd->tx_dropped_link_down);
4112 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4113 pf->stat_offsets_loaded,
4114 &osd->mac_local_faults,
4115 &nsd->mac_local_faults);
4116 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4117 pf->stat_offsets_loaded,
4118 &osd->mac_remote_faults,
4119 &nsd->mac_remote_faults);
4120 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4121 pf->stat_offsets_loaded,
4122 &osd->rx_length_errors,
4123 &nsd->rx_length_errors);
4125 /* Flow control (LFC) stats */
4126 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4127 pf->stat_offsets_loaded,
4128 &osd->link_xon_rx, &nsd->link_xon_rx);
4129 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4130 pf->stat_offsets_loaded,
4131 &osd->link_xon_tx, &nsd->link_xon_tx);
4132 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4133 pf->stat_offsets_loaded,
4134 &osd->link_xoff_rx, &nsd->link_xoff_rx);
4135 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4136 pf->stat_offsets_loaded,
4137 &osd->link_xoff_tx, &nsd->link_xoff_tx);
4139 /* Packet size stats rx */
4140 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4141 I40E_GLPRT_PRC64L(hw->port),
4142 pf->stat_offsets_loaded,
4143 &osd->rx_size_64, &nsd->rx_size_64);
4144 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4145 I40E_GLPRT_PRC127L(hw->port),
4146 pf->stat_offsets_loaded,
4147 &osd->rx_size_127, &nsd->rx_size_127);
4148 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4149 I40E_GLPRT_PRC255L(hw->port),
4150 pf->stat_offsets_loaded,
4151 &osd->rx_size_255, &nsd->rx_size_255);
4152 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4153 I40E_GLPRT_PRC511L(hw->port),
4154 pf->stat_offsets_loaded,
4155 &osd->rx_size_511, &nsd->rx_size_511);
4156 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4157 I40E_GLPRT_PRC1023L(hw->port),
4158 pf->stat_offsets_loaded,
4159 &osd->rx_size_1023, &nsd->rx_size_1023);
4160 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4161 I40E_GLPRT_PRC1522L(hw->port),
4162 pf->stat_offsets_loaded,
4163 &osd->rx_size_1522, &nsd->rx_size_1522);
4164 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4165 I40E_GLPRT_PRC9522L(hw->port),
4166 pf->stat_offsets_loaded,
4167 &osd->rx_size_big, &nsd->rx_size_big);
4169 /* Packet size stats tx */
4170 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4171 I40E_GLPRT_PTC64L(hw->port),
4172 pf->stat_offsets_loaded,
4173 &osd->tx_size_64, &nsd->tx_size_64);
4174 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4175 I40E_GLPRT_PTC127L(hw->port),
4176 pf->stat_offsets_loaded,
4177 &osd->tx_size_127, &nsd->tx_size_127);
4178 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4179 I40E_GLPRT_PTC255L(hw->port),
4180 pf->stat_offsets_loaded,
4181 &osd->tx_size_255, &nsd->tx_size_255);
4182 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4183 I40E_GLPRT_PTC511L(hw->port),
4184 pf->stat_offsets_loaded,
4185 &osd->tx_size_511, &nsd->tx_size_511);
4186 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4187 I40E_GLPRT_PTC1023L(hw->port),
4188 pf->stat_offsets_loaded,
4189 &osd->tx_size_1023, &nsd->tx_size_1023);
4190 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4191 I40E_GLPRT_PTC1522L(hw->port),
4192 pf->stat_offsets_loaded,
4193 &osd->tx_size_1522, &nsd->tx_size_1522);
4194 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4195 I40E_GLPRT_PTC9522L(hw->port),
4196 pf->stat_offsets_loaded,
4197 &osd->tx_size_big, &nsd->tx_size_big);
4199 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4200 pf->stat_offsets_loaded,
4201 &osd->rx_undersize, &nsd->rx_undersize);
4202 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4203 pf->stat_offsets_loaded,
4204 &osd->rx_fragments, &nsd->rx_fragments);
4205 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4206 pf->stat_offsets_loaded,
4207 &osd->rx_oversize, &nsd->rx_oversize);
4208 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4209 pf->stat_offsets_loaded,
4210 &osd->rx_jabber, &nsd->rx_jabber);
4211 pf->stat_offsets_loaded = true;
4214 /* Update vsi stats */
4215 ixl_update_vsi_stats(vsi);
4217 for (int i = 0; i < pf->num_vfs; i++) {
4219 if (vf->vf_flags & VF_FLAG_ENABLED)
4220 ixl_update_eth_stats(&pf->vfs[i].vsi);
4225 ** Tasklet handler for MSIX Adminq interrupts
4226 ** - do outside interrupt since it might sleep
4229 ixl_do_adminq(void *context, int pending)
4231 struct ixl_pf *pf = context;
4232 struct i40e_hw *hw = &pf->hw;
4233 struct ixl_vsi *vsi = &pf->vsi;
4234 struct i40e_arq_event_info event;
4239 event.buf_len = IXL_AQ_BUF_SZ;
4240 event.msg_buf = malloc(event.buf_len,
4241 M_DEVBUF, M_NOWAIT | M_ZERO);
4242 if (!event.msg_buf) {
4243 printf("Unable to allocate adminq memory\n");
4248 /* clean and process any events */
4250 ret = i40e_clean_arq_element(hw, &event, &result);
4253 opcode = LE16_TO_CPU(event.desc.opcode);
4255 case i40e_aqc_opc_get_link_status:
4256 ixl_link_event(pf, &event);
4257 ixl_update_link_status(pf);
4259 case i40e_aqc_opc_send_msg_to_pf:
4261 ixl_handle_vf_msg(pf, &event);
4264 case i40e_aqc_opc_event_lan_overflow:
4268 printf("AdminQ unknown event %x\n", opcode);
4273 } while (result && (loop++ < IXL_ADM_LIMIT));
4275 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4276 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4277 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4278 free(event.msg_buf, M_DEVBUF);
4281 * If there are still messages to process, reschedule ourselves.
4282 * Otherwise, re-enable our interrupt and go to sleep.
4285 taskqueue_enqueue(pf->tq, &pf->adminq);
4287 ixl_enable_intr(vsi);
4293 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4296 int error, input = 0;
4298 error = sysctl_handle_int(oidp, &input, 0, req);
4300 if (error || !req->newptr)
4304 pf = (struct ixl_pf *)arg1;
4305 ixl_print_debug_info(pf);
4312 ixl_print_debug_info(struct ixl_pf *pf)
4314 struct i40e_hw *hw = &pf->hw;
4315 struct ixl_vsi *vsi = &pf->vsi;
4316 struct ixl_queue *que = vsi->queues;
4317 struct rx_ring *rxr = &que->rxr;
4318 struct tx_ring *txr = &que->txr;
4322 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4323 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4324 printf("RX next check = %x\n", rxr->next_check);
4325 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4326 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4327 printf("TX desc avail = %x\n", txr->avail);
4329 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4330 printf("RX Bytes = %x\n", reg);
4331 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4332 printf("Port RX Bytes = %x\n", reg);
4333 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4334 printf("RX discard = %x\n", reg);
4335 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4336 printf("Port RX discard = %x\n", reg);
4338 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4339 printf("TX errors = %x\n", reg);
4340 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4341 printf("TX Bytes = %x\n", reg);
4343 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4344 printf("RX undersize = %x\n", reg);
4345 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4346 printf("RX fragments = %x\n", reg);
4347 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4348 printf("RX oversize = %x\n", reg);
4349 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4350 printf("RX length error = %x\n", reg);
4351 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4352 printf("mac remote fault = %x\n", reg);
4353 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4354 printf("mac local fault = %x\n", reg);
4358 * Update VSI-specific ethernet statistics counters.
4360 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4362 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4363 struct i40e_hw *hw = &pf->hw;
4364 struct i40e_eth_stats *es;
4365 struct i40e_eth_stats *oes;
4366 struct i40e_hw_port_stats *nsd;
4367 u16 stat_idx = vsi->info.stat_counter_idx;
4369 es = &vsi->eth_stats;
4370 oes = &vsi->eth_stats_offsets;
4373 /* Gather up the stats that the hw collects */
4374 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4375 vsi->stat_offsets_loaded,
4376 &oes->tx_errors, &es->tx_errors);
4377 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4378 vsi->stat_offsets_loaded,
4379 &oes->rx_discards, &es->rx_discards);
4381 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4382 I40E_GLV_GORCL(stat_idx),
4383 vsi->stat_offsets_loaded,
4384 &oes->rx_bytes, &es->rx_bytes);
4385 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4386 I40E_GLV_UPRCL(stat_idx),
4387 vsi->stat_offsets_loaded,
4388 &oes->rx_unicast, &es->rx_unicast);
4389 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4390 I40E_GLV_MPRCL(stat_idx),
4391 vsi->stat_offsets_loaded,
4392 &oes->rx_multicast, &es->rx_multicast);
4393 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4394 I40E_GLV_BPRCL(stat_idx),
4395 vsi->stat_offsets_loaded,
4396 &oes->rx_broadcast, &es->rx_broadcast);
4398 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4399 I40E_GLV_GOTCL(stat_idx),
4400 vsi->stat_offsets_loaded,
4401 &oes->tx_bytes, &es->tx_bytes);
4402 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4403 I40E_GLV_UPTCL(stat_idx),
4404 vsi->stat_offsets_loaded,
4405 &oes->tx_unicast, &es->tx_unicast);
4406 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4407 I40E_GLV_MPTCL(stat_idx),
4408 vsi->stat_offsets_loaded,
4409 &oes->tx_multicast, &es->tx_multicast);
4410 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4411 I40E_GLV_BPTCL(stat_idx),
4412 vsi->stat_offsets_loaded,
4413 &oes->tx_broadcast, &es->tx_broadcast);
4414 vsi->stat_offsets_loaded = true;
4418 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4422 struct i40e_eth_stats *es;
4425 struct i40e_hw_port_stats *nsd;
4429 es = &vsi->eth_stats;
4432 ixl_update_eth_stats(vsi);
4434 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4435 for (int i = 0; i < vsi->num_queues; i++)
4436 tx_discards += vsi->queues[i].txr.br->br_drops;
4438 /* Update ifnet stats */
4439 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4442 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4445 IXL_SET_IBYTES(vsi, es->rx_bytes);
4446 IXL_SET_OBYTES(vsi, es->tx_bytes);
4447 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4448 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4450 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4451 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4453 IXL_SET_OERRORS(vsi, es->tx_errors);
4454 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4455 IXL_SET_OQDROPS(vsi, tx_discards);
4456 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4457 IXL_SET_COLLISIONS(vsi, 0);
4461 * Reset all of the stats for the given pf
4463 void ixl_pf_reset_stats(struct ixl_pf *pf)
4465 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4466 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4467 pf->stat_offsets_loaded = false;
4471 * Resets all stats of the given vsi
4473 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4475 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4476 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4477 vsi->stat_offsets_loaded = false;
4481 * Read and update a 48 bit stat from the hw
4483 * Since the device stats are not reset at PFReset, they likely will not
4484 * be zeroed when the driver starts. We'll save the first values read
4485 * and use them as offsets to be subtracted from the raw values in order
4486 * to report stats that count from zero.
4489 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4490 bool offset_loaded, u64 *offset, u64 *stat)
4494 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4495 new_data = rd64(hw, loreg);
4498 * Use two rd32's instead of one rd64; FreeBSD versions before
4499 * 10 don't support 8 byte bus reads/writes.
4501 new_data = rd32(hw, loreg);
4502 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4507 if (new_data >= *offset)
4508 *stat = new_data - *offset;
4510 *stat = (new_data + ((u64)1 << 48)) - *offset;
4511 *stat &= 0xFFFFFFFFFFFFULL;
4515 * Read and update a 32 bit stat from the hw
4518 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4519 bool offset_loaded, u64 *offset, u64 *stat)
4523 new_data = rd32(hw, reg);
4526 if (new_data >= *offset)
4527 *stat = (u32)(new_data - *offset);
4529 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4533 ** Set flow control using sysctl:
4540 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4543 * TODO: ensure flow control is disabled if
4544 * priority flow control is enabled
4546 * TODO: ensure tx CRC by hardware should be enabled
4547 * if tx flow control is enabled.
4549 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4550 struct i40e_hw *hw = &pf->hw;
4551 device_t dev = pf->dev;
4553 enum i40e_status_code aq_error = 0;
4557 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4558 if ((error) || (req->newptr == NULL))
4560 if (pf->fc < 0 || pf->fc > 3) {
4562 "Invalid fc mode; valid modes are 0 through 3\n");
4567 ** Changing flow control mode currently does not work on
4570 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4571 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4572 device_printf(dev, "Changing flow control mode unsupported"
4573 " on 40GBase-CR4 media.\n");
4577 /* Set fc ability for port */
4578 hw->fc.requested_mode = pf->fc;
4579 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4582 "%s: Error setting new fc mode %d; fc_err %#x\n",
4583 __func__, aq_error, fc_aq_err);
4591 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4593 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4594 struct i40e_hw *hw = &pf->hw;
4595 int error = 0, index = 0;
4606 ixl_update_link_status(pf);
4608 switch (hw->phy.link_info.link_speed) {
4609 case I40E_LINK_SPEED_100MB:
4612 case I40E_LINK_SPEED_1GB:
4615 case I40E_LINK_SPEED_10GB:
4618 case I40E_LINK_SPEED_40GB:
4621 case I40E_LINK_SPEED_20GB:
4624 case I40E_LINK_SPEED_UNKNOWN:
4630 error = sysctl_handle_string(oidp, speeds[index],
4631 strlen(speeds[index]), req);
4636 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4638 struct i40e_hw *hw = &pf->hw;
4639 device_t dev = pf->dev;
4640 struct i40e_aq_get_phy_abilities_resp abilities;
4641 struct i40e_aq_set_phy_config config;
4642 enum i40e_status_code aq_error = 0;
4644 /* Get current capability information */
4645 aq_error = i40e_aq_get_phy_capabilities(hw,
4646 FALSE, FALSE, &abilities, NULL);
4649 "%s: Error getting phy capabilities %d,"
4650 " aq error: %d\n", __func__, aq_error,
4651 hw->aq.asq_last_status);
4655 /* Prepare new config */
4656 bzero(&config, sizeof(config));
4657 config.phy_type = abilities.phy_type;
4658 config.abilities = abilities.abilities
4659 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4660 config.eee_capability = abilities.eee_capability;
4661 config.eeer = abilities.eeer_val;
4662 config.low_power_ctrl = abilities.d3_lpan;
4663 /* Translate into aq cmd link_speed */
4665 config.link_speed |= I40E_LINK_SPEED_20GB;
4667 config.link_speed |= I40E_LINK_SPEED_10GB;
4669 config.link_speed |= I40E_LINK_SPEED_1GB;
4671 config.link_speed |= I40E_LINK_SPEED_100MB;
4673 /* Do aq command & restart link */
4674 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4677 "%s: Error setting new phy config %d,"
4678 " aq error: %d\n", __func__, aq_error,
4679 hw->aq.asq_last_status);
4684 ** This seems a bit heavy handed, but we
4685 ** need to get a reinit on some devices
4689 ixl_init_locked(pf);
4696 ** Control link advertise speed:
4698 ** 0x1 - advertise 100 Mb
4699 ** 0x2 - advertise 1G
4700 ** 0x4 - advertise 10G
4701 ** 0x8 - advertise 20G
4703 ** Does not work on 40G devices.
4706 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4708 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4709 struct i40e_hw *hw = &pf->hw;
4710 device_t dev = pf->dev;
4711 int requested_ls = 0;
4715 ** FW doesn't support changing advertised speed
4716 ** for 40G devices; speed is always 40G.
4718 if (i40e_is_40G_device(hw->device_id))
4721 /* Read in new mode */
4722 requested_ls = pf->advertised_speed;
4723 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4724 if ((error) || (req->newptr == NULL))
4726 /* Check for sane value */
4727 if (requested_ls < 0x1 || requested_ls > 0xE) {
4728 device_printf(dev, "Invalid advertised speed; "
4729 "valid modes are 0x1 through 0xE\n");
4732 /* Then check for validity based on adapter type */
4733 switch (hw->device_id) {
4734 case I40E_DEV_ID_10G_BASE_T:
4735 if (requested_ls & 0x8) {
4737 "20Gbs speed not supported on this device.\n");
4741 case I40E_DEV_ID_20G_KR2:
4742 if (requested_ls & 0x1) {
4744 "100Mbs speed not supported on this device.\n");
4749 if (requested_ls & ~0x6) {
4751 "Only 1/10Gbs speeds are supported on this device.\n");
4757 /* Exit if no change */
4758 if (pf->advertised_speed == requested_ls)
4761 error = ixl_set_advertised_speeds(pf, requested_ls);
4765 pf->advertised_speed = requested_ls;
4766 ixl_update_link_status(pf);
4771 ** Get the width and transaction speed of
4772 ** the bus this adapter is plugged into.
4775 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4781 /* Get the PCI Express Capabilities offset */
4782 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4784 /* ...and read the Link Status Register */
4785 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4787 switch (link & I40E_PCI_LINK_WIDTH) {
4788 case I40E_PCI_LINK_WIDTH_1:
4789 hw->bus.width = i40e_bus_width_pcie_x1;
4791 case I40E_PCI_LINK_WIDTH_2:
4792 hw->bus.width = i40e_bus_width_pcie_x2;
4794 case I40E_PCI_LINK_WIDTH_4:
4795 hw->bus.width = i40e_bus_width_pcie_x4;
4797 case I40E_PCI_LINK_WIDTH_8:
4798 hw->bus.width = i40e_bus_width_pcie_x8;
4801 hw->bus.width = i40e_bus_width_unknown;
4805 switch (link & I40E_PCI_LINK_SPEED) {
4806 case I40E_PCI_LINK_SPEED_2500:
4807 hw->bus.speed = i40e_bus_speed_2500;
4809 case I40E_PCI_LINK_SPEED_5000:
4810 hw->bus.speed = i40e_bus_speed_5000;
4812 case I40E_PCI_LINK_SPEED_8000:
4813 hw->bus.speed = i40e_bus_speed_8000;
4816 hw->bus.speed = i40e_bus_speed_unknown;
4821 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4822 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4823 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4824 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4825 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4826 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4827 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4830 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4831 (hw->bus.speed < i40e_bus_speed_8000)) {
4832 device_printf(dev, "PCI-Express bandwidth available"
4833 " for this device\n may be insufficient for"
4834 " optimal performance.\n");
4835 device_printf(dev, "For expected performance a x8 "
4836 "PCIE Gen3 slot is required.\n");
4843 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4845 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4846 struct i40e_hw *hw = &pf->hw;
4849 snprintf(buf, sizeof(buf),
4850 "f%d.%d a%d.%d n%02x.%02x e%08x",
4851 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4852 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4853 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4854 IXL_NVM_VERSION_HI_SHIFT,
4855 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4856 IXL_NVM_VERSION_LO_SHIFT,
4858 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4862 #ifdef IXL_DEBUG_SYSCTL
4864 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4866 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4867 struct i40e_hw *hw = &pf->hw;
4868 struct i40e_link_status link_status;
4871 enum i40e_status_code aq_error = 0;
4873 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4875 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4880 "PHY Type : %#04x\n"
4882 "Link info: %#04x\n"
4885 link_status.phy_type, link_status.link_speed,
4886 link_status.link_info, link_status.an_info,
4887 link_status.ext_info);
4889 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4893 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4895 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4896 struct i40e_hw *hw = &pf->hw;
4898 enum i40e_status_code aq_error = 0;
4900 struct i40e_aq_get_phy_abilities_resp abilities;
4902 aq_error = i40e_aq_get_phy_capabilities(hw,
4903 TRUE, FALSE, &abilities, NULL);
4905 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4910 "PHY Type : %#010x\n"
4912 "Abilities: %#04x\n"
4914 "EEER reg : %#010x\n"
4916 abilities.phy_type, abilities.link_speed,
4917 abilities.abilities, abilities.eee_capability,
4918 abilities.eeer_val, abilities.d3_lpan);
4920 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4924 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4926 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4927 struct ixl_vsi *vsi = &pf->vsi;
4928 struct ixl_mac_filter *f;
4933 int ftl_counter = 0;
4937 SLIST_FOREACH(f, &vsi->ftl, next) {
4942 sysctl_handle_string(oidp, "(none)", 6, req);
4946 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4947 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4949 sprintf(buf_i++, "\n");
4950 SLIST_FOREACH(f, &vsi->ftl, next) {
4952 MAC_FORMAT ", vlan %4d, flags %#06x",
4953 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4955 /* don't print '\n' for last entry */
4956 if (++ftl_counter != ftl_len) {
4957 sprintf(buf_i, "\n");
4962 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4964 printf("sysctl error: %d\n", error);
4965 free(buf, M_DEVBUF);
4969 #define IXL_SW_RES_SIZE 0x14
4971 ixl_res_alloc_cmp(const void *a, const void *b)
4973 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4974 one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4975 two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4977 return ((int)one->resource_type - (int)two->resource_type);
4981 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4983 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4984 struct i40e_hw *hw = &pf->hw;
4985 device_t dev = pf->dev;
4990 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4992 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4994 device_printf(dev, "Could not allocate sbuf for output.\n");
4998 bzero(resp, sizeof(resp));
4999 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5005 "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5006 __func__, error, hw->aq.asq_last_status);
5011 /* Sort entries by type for display */
5012 qsort(resp, num_entries,
5013 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5014 &ixl_res_alloc_cmp);
5016 sbuf_cat(buf, "\n");
5017 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5019 "Type | Guaranteed | Total | Used | Un-allocated\n"
5020 " | (this) | (all) | (this) | (all) \n");
5021 for (int i = 0; i < num_entries; i++) {
5023 "%#4x | %10d %5d %6d %12d",
5024 resp[i].resource_type,
5028 resp[i].total_unalloced);
5029 if (i < num_entries - 1)
5030 sbuf_cat(buf, "\n");
5033 error = sbuf_finish(buf);
5035 device_printf(dev, "Error finishing sbuf: %d\n", error);
5043 ** Caller must init and delete sbuf; this function will clear and
5044 ** finish it for caller.
5047 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5051 if (seid == 0 && uplink)
5052 sbuf_cat(s, "Network");
5054 sbuf_cat(s, "Host");
5058 sbuf_printf(s, "MAC %d", seid - 2);
5059 else if (seid <= 15)
5060 sbuf_cat(s, "Reserved");
5061 else if (seid <= 31)
5062 sbuf_printf(s, "PF %d", seid - 16);
5063 else if (seid <= 159)
5064 sbuf_printf(s, "VF %d", seid - 32);
5065 else if (seid <= 287)
5066 sbuf_cat(s, "Reserved");
5067 else if (seid <= 511)
5068 sbuf_cat(s, "Other"); // for other structures
5069 else if (seid <= 895)
5070 sbuf_printf(s, "VSI %d", seid - 512);
5071 else if (seid <= 1023)
5072 sbuf_printf(s, "Reserved");
5074 sbuf_cat(s, "Invalid");
5077 return sbuf_data(s);
5081 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5083 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5084 struct i40e_hw *hw = &pf->hw;
5085 device_t dev = pf->dev;
5089 u8 aq_buf[I40E_AQ_LARGE_BUF];
5092 struct i40e_aqc_get_switch_config_resp *sw_config;
5093 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5095 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5097 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5101 error = i40e_aq_get_switch_config(hw, sw_config,
5102 sizeof(aq_buf), &next, NULL);
5105 "%s: aq_get_switch_config() error %d, aq error %d\n",
5106 __func__, error, hw->aq.asq_last_status);
5111 nmbuf = sbuf_new_auto();
5113 device_printf(dev, "Could not allocate sbuf for name output.\n");
5118 sbuf_cat(buf, "\n");
5119 // Assuming <= 255 elements in switch
5120 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5122 ** Revision -- all elements are revision 1 for now
5125 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5126 " | | | (uplink)\n");
5127 for (int i = 0; i < sw_config->header.num_reported; i++) {
5128 // "%4d (%8s) | %8s %8s %#8x",
5129 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5131 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5132 sw_config->element[i].seid, false));
5133 sbuf_cat(buf, " | ");
5134 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5135 sw_config->element[i].uplink_seid, true));
5137 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5138 sw_config->element[i].downlink_seid, false));
5140 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5141 if (i < sw_config->header.num_reported - 1)
5142 sbuf_cat(buf, "\n");
5146 error = sbuf_finish(buf);
5148 device_printf(dev, "Error finishing sbuf: %d\n", error);
5154 #endif /* IXL_DEBUG_SYSCTL */
5159 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5162 struct ixl_vsi *vsi;
5163 struct i40e_vsi_context vsi_ctx;
5165 uint16_t first_queue;
5166 enum i40e_status_code code;
5171 vsi_ctx.pf_num = hw->pf_id;
5172 vsi_ctx.uplink_seid = pf->veb_seid;
5173 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5174 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5175 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5177 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5179 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5180 vsi_ctx.info.switch_id = htole16(0);
5182 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5183 vsi_ctx.info.sec_flags = 0;
5184 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5185 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5187 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5188 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5189 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5191 vsi_ctx.info.valid_sections |=
5192 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5193 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5194 first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5195 for (i = 0; i < IXLV_MAX_QUEUES; i++)
5196 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5197 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5198 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5200 vsi_ctx.info.tc_mapping[0] = htole16(
5201 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5202 (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5204 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5205 if (code != I40E_SUCCESS)
5206 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5207 vf->vsi.seid = vsi_ctx.seid;
5208 vf->vsi.vsi_num = vsi_ctx.vsi_number;
5209 vf->vsi.first_queue = first_queue;
5210 vf->vsi.num_queues = IXLV_MAX_QUEUES;
5212 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5213 if (code != I40E_SUCCESS)
5214 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5216 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5217 if (code != I40E_SUCCESS) {
5218 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5219 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5220 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5223 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5228 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5235 error = ixl_vf_alloc_vsi(pf, vf);
5239 vf->vsi.hw_filters_add = 0;
5240 vf->vsi.hw_filters_del = 0;
5241 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5242 ixl_reconfigure_filters(&vf->vsi);
5248 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5255 * Two queues are mapped in a single register, so we have to do some
5256 * gymnastics to convert the queue number into a register index and
5260 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5262 qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5263 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5264 qtable |= val << shift;
5265 wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5269 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5278 * Contiguous mappings aren't actually supported by the hardware,
5279 * so we have to use non-contiguous mappings.
5281 wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5282 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5284 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5285 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5287 for (i = 0; i < vf->vsi.num_queues; i++) {
5288 qtable = (vf->vsi.first_queue + i) <<
5289 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5291 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5294 /* Map queues allocated to VF to its VSI. */
5295 for (i = 0; i < vf->vsi.num_queues; i++)
5296 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5298 /* Set rest of VSI queues as unused. */
5299 for (; i < IXL_MAX_VSI_QUEUES; i++)
5300 ixl_vf_map_vsi_queue(hw, vf, i,
5301 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5307 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5316 i40e_aq_delete_element(hw, vsi->seid, NULL);
5320 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5323 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5328 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5331 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5332 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5337 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5340 uint32_t vfint_reg, vpint_reg;
5345 ixl_vf_vsi_release(pf, &vf->vsi);
5347 /* Index 0 has a special register. */
5348 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5350 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5351 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5352 ixl_vf_disable_queue_intr(hw, vfint_reg);
5355 /* Index 0 has a special register. */
5356 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5358 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5359 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5360 ixl_vf_unregister_intr(hw, vpint_reg);
5363 vf->vsi.num_queues = 0;
5367 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5371 uint16_t global_vf_num;
5375 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5377 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5378 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5379 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5380 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5381 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5390 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5397 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5398 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5399 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5402 ixl_reinit_vf(pf, vf);
5406 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5409 uint32_t vfrstat, vfrtrig;
5414 error = ixl_flush_pcie(pf, vf);
5416 device_printf(pf->dev,
5417 "Timed out waiting for PCIe activity to stop on VF-%d\n",
5420 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5423 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5424 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5428 if (i == IXL_VF_RESET_TIMEOUT)
5429 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5431 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5433 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5434 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5435 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5437 if (vf->vsi.seid != 0)
5438 ixl_disable_rings(&vf->vsi);
5440 ixl_vf_release_resources(pf, vf);
5441 ixl_vf_setup_vsi(pf, vf);
5442 ixl_vf_map_queues(pf, vf);
5444 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5449 ixl_vc_opcode_str(uint16_t op)
5453 case I40E_VIRTCHNL_OP_VERSION:
5455 case I40E_VIRTCHNL_OP_RESET_VF:
5456 return ("RESET_VF");
5457 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5458 return ("GET_VF_RESOURCES");
5459 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5460 return ("CONFIG_TX_QUEUE");
5461 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5462 return ("CONFIG_RX_QUEUE");
5463 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5464 return ("CONFIG_VSI_QUEUES");
5465 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5466 return ("CONFIG_IRQ_MAP");
5467 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5468 return ("ENABLE_QUEUES");
5469 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5470 return ("DISABLE_QUEUES");
5471 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5472 return ("ADD_ETHER_ADDRESS");
5473 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5474 return ("DEL_ETHER_ADDRESS");
5475 case I40E_VIRTCHNL_OP_ADD_VLAN:
5476 return ("ADD_VLAN");
5477 case I40E_VIRTCHNL_OP_DEL_VLAN:
5478 return ("DEL_VLAN");
5479 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5480 return ("CONFIG_PROMISCUOUS_MODE");
5481 case I40E_VIRTCHNL_OP_GET_STATS:
5482 return ("GET_STATS");
5483 case I40E_VIRTCHNL_OP_FCOE:
5485 case I40E_VIRTCHNL_OP_EVENT:
5493 ixl_vc_opcode_level(uint16_t opcode)
5497 case I40E_VIRTCHNL_OP_GET_STATS:
5505 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5506 enum i40e_status_code status, void *msg, uint16_t len)
5512 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5514 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5515 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5516 ixl_vc_opcode_str(op), op, status, vf->vf_num);
5518 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5522 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5525 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5529 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5530 enum i40e_status_code status, const char *file, int line)
5533 I40E_VC_DEBUG(pf, 1,
5534 "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5535 ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5536 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5540 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5543 struct i40e_virtchnl_version_info reply;
5545 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5546 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5551 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5552 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5553 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5558 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5562 if (msg_size != 0) {
5563 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5568 ixl_reset_vf(pf, vf);
5570 /* No response to a reset message. */
5574 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5577 struct i40e_virtchnl_vf_resource reply;
5579 if (msg_size != 0) {
5580 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5585 bzero(&reply, sizeof(reply));
5587 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5590 reply.num_queue_pairs = vf->vsi.num_queues;
5591 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5592 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5593 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5594 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5595 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5597 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5598 I40E_SUCCESS, &reply, sizeof(reply));
5602 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5603 struct i40e_virtchnl_txq_info *info)
5606 struct i40e_hmc_obj_txq txq;
5607 uint16_t global_queue_num, global_vf_num;
5608 enum i40e_status_code status;
5612 global_queue_num = vf->vsi.first_queue + info->queue_id;
5613 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5614 bzero(&txq, sizeof(txq));
5616 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5617 if (status != I40E_SUCCESS)
5620 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5622 txq.head_wb_ena = info->headwb_enabled;
5623 txq.head_wb_addr = info->dma_headwb_addr;
5624 txq.qlen = info->ring_len;
5625 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5626 txq.rdylist_act = 0;
5628 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5629 if (status != I40E_SUCCESS)
5632 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5633 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5634 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5635 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5642 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5643 struct i40e_virtchnl_rxq_info *info)
5646 struct i40e_hmc_obj_rxq rxq;
5647 uint16_t global_queue_num;
5648 enum i40e_status_code status;
5651 global_queue_num = vf->vsi.first_queue + info->queue_id;
5652 bzero(&rxq, sizeof(rxq));
5654 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5657 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5658 info->max_pkt_size < ETHER_MIN_LEN)
5661 if (info->splithdr_enabled) {
5662 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5665 rxq.hsplit_0 = info->rx_split_pos &
5666 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5667 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5668 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5669 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5670 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5675 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5676 if (status != I40E_SUCCESS)
5679 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5680 rxq.qlen = info->ring_len;
5682 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5688 rxq.rxmax = info->max_pkt_size;
5689 rxq.tphrdesc_ena = 1;
5690 rxq.tphwdesc_ena = 1;
5691 rxq.tphdata_ena = 1;
5692 rxq.tphhead_ena = 1;
5696 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5697 if (status != I40E_SUCCESS)
5704 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5707 struct i40e_virtchnl_vsi_queue_config_info *info;
5708 struct i40e_virtchnl_queue_pair_info *pair;
5711 if (msg_size < sizeof(*info)) {
5712 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5718 if (info->num_queue_pairs == 0) {
5719 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5724 if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5725 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5730 if (info->vsi_id != vf->vsi.vsi_num) {
5731 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5736 for (i = 0; i < info->num_queue_pairs; i++) {
5737 pair = &info->qpair[i];
5739 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5740 pair->rxq.vsi_id != vf->vsi.vsi_num ||
5741 pair->txq.queue_id != pair->rxq.queue_id ||
5742 pair->txq.queue_id >= vf->vsi.num_queues) {
5744 i40e_send_vf_nack(pf, vf,
5745 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5749 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5750 i40e_send_vf_nack(pf, vf,
5751 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5755 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5756 i40e_send_vf_nack(pf, vf,
5757 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5762 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5766 ixl_vf_set_qctl(struct ixl_pf *pf,
5767 const struct i40e_virtchnl_vector_map *vector,
5768 enum i40e_queue_type cur_type, uint16_t cur_queue,
5769 enum i40e_queue_type *last_type, uint16_t *last_queue)
5771 uint32_t offset, qctl;
5774 if (cur_type == I40E_QUEUE_TYPE_RX) {
5775 offset = I40E_QINT_RQCTL(cur_queue);
5776 itr_indx = vector->rxitr_idx;
5778 offset = I40E_QINT_TQCTL(cur_queue);
5779 itr_indx = vector->txitr_idx;
5782 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5783 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5784 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5785 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5786 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5788 wr32(&pf->hw, offset, qctl);
5790 *last_type = cur_type;
5791 *last_queue = cur_queue;
5795 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5796 const struct i40e_virtchnl_vector_map *vector)
5800 enum i40e_queue_type type, last_type;
5801 uint32_t lnklst_reg;
5802 uint16_t rxq_map, txq_map, cur_queue, last_queue;
5806 rxq_map = vector->rxq_map;
5807 txq_map = vector->txq_map;
5809 last_queue = IXL_END_OF_INTR_LNKLST;
5810 last_type = I40E_QUEUE_TYPE_RX;
5813 * The datasheet says to optimize performance, RX queues and TX queues
5814 * should be interleaved in the interrupt linked list, so we process
5815 * both at once here.
5817 while ((rxq_map != 0) || (txq_map != 0)) {
5819 qindex = ffs(txq_map) - 1;
5820 type = I40E_QUEUE_TYPE_TX;
5821 cur_queue = vf->vsi.first_queue + qindex;
5822 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5823 &last_type, &last_queue);
5824 txq_map &= ~(1 << qindex);
5828 qindex = ffs(rxq_map) - 1;
5829 type = I40E_QUEUE_TYPE_RX;
5830 cur_queue = vf->vsi.first_queue + qindex;
5831 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5832 &last_type, &last_queue);
5833 rxq_map &= ~(1 << qindex);
5837 if (vector->vector_id == 0)
5838 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5840 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5842 wr32(hw, lnklst_reg,
5843 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5844 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5850 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5853 struct i40e_virtchnl_irq_map_info *map;
5854 struct i40e_virtchnl_vector_map *vector;
5856 int i, largest_txq, largest_rxq;
5860 if (msg_size < sizeof(*map)) {
5861 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5867 if (map->num_vectors == 0) {
5868 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5873 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5874 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5879 for (i = 0; i < map->num_vectors; i++) {
5880 vector = &map->vecmap[i];
5882 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5883 vector->vsi_id != vf->vsi.vsi_num) {
5884 i40e_send_vf_nack(pf, vf,
5885 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5889 if (vector->rxq_map != 0) {
5890 largest_rxq = fls(vector->rxq_map) - 1;
5891 if (largest_rxq >= vf->vsi.num_queues) {
5892 i40e_send_vf_nack(pf, vf,
5893 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5899 if (vector->txq_map != 0) {
5900 largest_txq = fls(vector->txq_map) - 1;
5901 if (largest_txq >= vf->vsi.num_queues) {
5902 i40e_send_vf_nack(pf, vf,
5903 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5909 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5910 vector->txitr_idx > IXL_MAX_ITR_IDX) {
5911 i40e_send_vf_nack(pf, vf,
5912 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5917 ixl_vf_config_vector(pf, vf, vector);
5920 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
5924 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5927 struct i40e_virtchnl_queue_select *select;
5930 if (msg_size != sizeof(*select)) {
5931 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5937 if (select->vsi_id != vf->vsi.vsi_num ||
5938 select->rx_queues == 0 || select->tx_queues == 0) {
5939 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5944 error = ixl_enable_rings(&vf->vsi);
5946 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5951 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
5955 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
5956 void *msg, uint16_t msg_size)
5958 struct i40e_virtchnl_queue_select *select;
5961 if (msg_size != sizeof(*select)) {
5962 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5968 if (select->vsi_id != vf->vsi.vsi_num ||
5969 select->rx_queues == 0 || select->tx_queues == 0) {
5970 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5975 error = ixl_disable_rings(&vf->vsi);
5977 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5982 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
5986 ixl_zero_mac(const uint8_t *addr)
5988 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
5990 return (cmp_etheraddr(addr, zero));
5994 ixl_bcast_mac(const uint8_t *addr)
5997 return (cmp_etheraddr(addr, ixl_bcast_addr));
6001 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6004 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6008 * If the VF is not allowed to change its MAC address, don't let it
6009 * set a MAC filter for an address that is not a multicast address and
6010 * is not its assigned MAC.
6012 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6013 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6020 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6023 struct i40e_virtchnl_ether_addr_list *addr_list;
6024 struct i40e_virtchnl_ether_addr *addr;
6025 struct ixl_vsi *vsi;
6027 size_t expected_size;
6031 if (msg_size < sizeof(*addr_list)) {
6032 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6038 expected_size = sizeof(*addr_list) +
6039 addr_list->num_elements * sizeof(*addr);
6041 if (addr_list->num_elements == 0 ||
6042 addr_list->vsi_id != vsi->vsi_num ||
6043 msg_size != expected_size) {
6044 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6049 for (i = 0; i < addr_list->num_elements; i++) {
6050 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6051 i40e_send_vf_nack(pf, vf,
6052 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6057 for (i = 0; i < addr_list->num_elements; i++) {
6058 addr = &addr_list->list[i];
6059 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6062 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6066 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6069 struct i40e_virtchnl_ether_addr_list *addr_list;
6070 struct i40e_virtchnl_ether_addr *addr;
6071 size_t expected_size;
6074 if (msg_size < sizeof(*addr_list)) {
6075 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6081 expected_size = sizeof(*addr_list) +
6082 addr_list->num_elements * sizeof(*addr);
6084 if (addr_list->num_elements == 0 ||
6085 addr_list->vsi_id != vf->vsi.vsi_num ||
6086 msg_size != expected_size) {
6087 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6092 for (i = 0; i < addr_list->num_elements; i++) {
6093 addr = &addr_list->list[i];
6094 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6095 i40e_send_vf_nack(pf, vf,
6096 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6101 for (i = 0; i < addr_list->num_elements; i++) {
6102 addr = &addr_list->list[i];
6103 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6106 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6109 static enum i40e_status_code
6110 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6112 struct i40e_vsi_context vsi_ctx;
6114 vsi_ctx.seid = vf->vsi.seid;
6116 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6117 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6118 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6119 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6120 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6124 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6127 struct i40e_virtchnl_vlan_filter_list *filter_list;
6128 enum i40e_status_code code;
6129 size_t expected_size;
6132 if (msg_size < sizeof(*filter_list)) {
6133 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6139 expected_size = sizeof(*filter_list) +
6140 filter_list->num_elements * sizeof(uint16_t);
6141 if (filter_list->num_elements == 0 ||
6142 filter_list->vsi_id != vf->vsi.vsi_num ||
6143 msg_size != expected_size) {
6144 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6149 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6150 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6155 for (i = 0; i < filter_list->num_elements; i++) {
6156 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6157 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6163 code = ixl_vf_enable_vlan_strip(pf, vf);
6164 if (code != I40E_SUCCESS) {
6165 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6169 for (i = 0; i < filter_list->num_elements; i++)
6170 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6172 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6176 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6179 struct i40e_virtchnl_vlan_filter_list *filter_list;
6181 size_t expected_size;
6183 if (msg_size < sizeof(*filter_list)) {
6184 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6190 expected_size = sizeof(*filter_list) +
6191 filter_list->num_elements * sizeof(uint16_t);
6192 if (filter_list->num_elements == 0 ||
6193 filter_list->vsi_id != vf->vsi.vsi_num ||
6194 msg_size != expected_size) {
6195 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6200 for (i = 0; i < filter_list->num_elements; i++) {
6201 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6202 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6208 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6209 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6214 for (i = 0; i < filter_list->num_elements; i++)
6215 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6217 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6221 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6222 void *msg, uint16_t msg_size)
6224 struct i40e_virtchnl_promisc_info *info;
6225 enum i40e_status_code code;
6227 if (msg_size != sizeof(*info)) {
6228 i40e_send_vf_nack(pf, vf,
6229 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6233 if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6234 i40e_send_vf_nack(pf, vf,
6235 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6240 if (info->vsi_id != vf->vsi.vsi_num) {
6241 i40e_send_vf_nack(pf, vf,
6242 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6246 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6247 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6248 if (code != I40E_SUCCESS) {
6249 i40e_send_vf_nack(pf, vf,
6250 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6254 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6255 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6256 if (code != I40E_SUCCESS) {
6257 i40e_send_vf_nack(pf, vf,
6258 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6262 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6266 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6269 struct i40e_virtchnl_queue_select *queue;
6271 if (msg_size != sizeof(*queue)) {
6272 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6278 if (queue->vsi_id != vf->vsi.vsi_num) {
6279 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6284 ixl_update_eth_stats(&vf->vsi);
6286 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6287 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6291 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6295 uint16_t vf_num, msg_size;
6298 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6299 opcode = le32toh(event->desc.cookie_high);
6301 if (vf_num >= pf->num_vfs) {
6302 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6306 vf = &pf->vfs[vf_num];
6307 msg = event->msg_buf;
6308 msg_size = event->msg_len;
6310 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6311 "Got msg %s(%d) from VF-%d of size %d\n",
6312 ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6315 case I40E_VIRTCHNL_OP_VERSION:
6316 ixl_vf_version_msg(pf, vf, msg, msg_size);
6318 case I40E_VIRTCHNL_OP_RESET_VF:
6319 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6321 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6322 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6324 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6325 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6327 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6328 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6330 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6331 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6333 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6334 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6336 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6337 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6339 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6340 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6342 case I40E_VIRTCHNL_OP_ADD_VLAN:
6343 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6345 case I40E_VIRTCHNL_OP_DEL_VLAN:
6346 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6348 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6349 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6351 case I40E_VIRTCHNL_OP_GET_STATS:
6352 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6355 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6356 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6357 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6359 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6364 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6366 ixl_handle_vflr(void *arg, int pending)
6370 uint16_t global_vf_num;
6371 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6378 for (i = 0; i < pf->num_vfs; i++) {
6379 global_vf_num = hw->func_caps.vf_base_id + i;
6381 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6382 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6383 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6384 if (vflrstat & vflrstat_mask) {
6385 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6388 ixl_reinit_vf(pf, &pf->vfs[i]);
6392 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6393 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6394 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6401 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6405 case I40E_AQ_RC_EPERM:
6407 case I40E_AQ_RC_ENOENT:
6409 case I40E_AQ_RC_ESRCH:
6411 case I40E_AQ_RC_EINTR:
6413 case I40E_AQ_RC_EIO:
6415 case I40E_AQ_RC_ENXIO:
6417 case I40E_AQ_RC_E2BIG:
6419 case I40E_AQ_RC_EAGAIN:
6421 case I40E_AQ_RC_ENOMEM:
6423 case I40E_AQ_RC_EACCES:
6425 case I40E_AQ_RC_EFAULT:
6427 case I40E_AQ_RC_EBUSY:
6429 case I40E_AQ_RC_EEXIST:
6431 case I40E_AQ_RC_EINVAL:
6433 case I40E_AQ_RC_ENOTTY:
6435 case I40E_AQ_RC_ENOSPC:
6437 case I40E_AQ_RC_ENOSYS:
6439 case I40E_AQ_RC_ERANGE:
6441 case I40E_AQ_RC_EFLUSHED:
6442 return (EINVAL); /* No exact equivalent in errno.h */
6443 case I40E_AQ_RC_BAD_ADDR:
6445 case I40E_AQ_RC_EMODE:
6447 case I40E_AQ_RC_EFBIG:
6455 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6459 struct ixl_vsi *pf_vsi;
6460 enum i40e_status_code ret;
6463 pf = device_get_softc(dev);
6468 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6471 if (pf->vfs == NULL) {
6476 for (i = 0; i < num_vfs; i++)
6477 sysctl_ctx_init(&pf->vfs[i].ctx);
6479 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6480 1, FALSE, FALSE, &pf->veb_seid, NULL);
6481 if (ret != I40E_SUCCESS) {
6482 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6483 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6488 ixl_configure_msix(pf);
6489 ixl_enable_adminq(hw);
6491 pf->num_vfs = num_vfs;
6496 free(pf->vfs, M_IXL);
6503 ixl_uninit_iov(device_t dev)
6507 struct ixl_vsi *vsi;
6512 pf = device_get_softc(dev);
6518 for (i = 0; i < pf->num_vfs; i++) {
6519 if (pf->vfs[i].vsi.seid != 0)
6520 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6523 if (pf->veb_seid != 0) {
6524 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6528 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6529 ixl_disable_intr(vsi);
6532 num_vfs = pf->num_vfs;
6538 /* Do this after the unlock as sysctl_ctx_free might sleep. */
6539 for (i = 0; i < num_vfs; i++)
6540 sysctl_ctx_free(&vfs[i].ctx);
6545 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6547 char sysctl_name[QUEUE_NAME_LEN];
6554 pf = device_get_softc(dev);
6555 vf = &pf->vfs[vfnum];
6561 vf->vf_flags = VF_FLAG_ENABLED;
6562 SLIST_INIT(&vf->vsi.ftl);
6564 error = ixl_vf_setup_vsi(pf, vf);
6568 if (nvlist_exists_binary(params, "mac-addr")) {
6569 mac = nvlist_get_binary(params, "mac-addr", &size);
6570 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6572 if (nvlist_get_bool(params, "allow-set-mac"))
6573 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6576 * If the administrator has not specified a MAC address then
6577 * we must allow the VF to choose one.
6579 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6581 if (nvlist_get_bool(params, "mac-anti-spoof"))
6582 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6584 if (nvlist_get_bool(params, "allow-promisc"))
6585 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6587 vf->vf_flags |= VF_FLAG_VLAN_CAP;
6589 ixl_reset_vf(pf, vf);
6593 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6594 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6599 #endif /* PCI_IOV */