1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
45 #include <net/rss_config.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixl_driver_version[] = "1.4.1";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixl_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74 /* required last entry */
78 /*********************************************************************
79 * Table of branding strings
80 *********************************************************************/
82 static char *ixl_strings[] = {
83 "Intel(R) Ethernet Connection XL710 Driver"
87 /*********************************************************************
89 *********************************************************************/
90 static int ixl_probe(device_t);
91 static int ixl_attach(device_t);
92 static int ixl_detach(device_t);
93 static int ixl_shutdown(device_t);
94 static int ixl_get_hw_capabilities(struct ixl_pf *);
95 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
96 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
97 static void ixl_init(void *);
98 static void ixl_init_locked(struct ixl_pf *);
99 static void ixl_stop(struct ixl_pf *);
100 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
101 static int ixl_media_change(struct ifnet *);
102 static void ixl_update_link_status(struct ixl_pf *);
103 static int ixl_allocate_pci_resources(struct ixl_pf *);
104 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
105 static int ixl_setup_stations(struct ixl_pf *);
106 static int ixl_switch_config(struct ixl_pf *);
107 static int ixl_initialize_vsi(struct ixl_vsi *);
108 static int ixl_assign_vsi_msix(struct ixl_pf *);
109 static int ixl_assign_vsi_legacy(struct ixl_pf *);
110 static int ixl_init_msix(struct ixl_pf *);
111 static void ixl_configure_msix(struct ixl_pf *);
112 static void ixl_configure_itr(struct ixl_pf *);
113 static void ixl_configure_legacy(struct ixl_pf *);
114 static void ixl_free_pci_resources(struct ixl_pf *);
115 static void ixl_local_timer(void *);
116 static int ixl_setup_interface(device_t, struct ixl_vsi *);
117 static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
118 static void ixl_config_rss(struct ixl_vsi *);
119 static void ixl_set_queue_rx_itr(struct ixl_queue *);
120 static void ixl_set_queue_tx_itr(struct ixl_queue *);
121 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
123 static int ixl_enable_rings(struct ixl_vsi *);
124 static int ixl_disable_rings(struct ixl_vsi *);
125 static void ixl_enable_intr(struct ixl_vsi *);
126 static void ixl_disable_intr(struct ixl_vsi *);
127 static void ixl_disable_rings_intr(struct ixl_vsi *);
129 static void ixl_enable_adminq(struct i40e_hw *);
130 static void ixl_disable_adminq(struct i40e_hw *);
131 static void ixl_enable_queue(struct i40e_hw *, int);
132 static void ixl_disable_queue(struct i40e_hw *, int);
133 static void ixl_enable_legacy(struct i40e_hw *);
134 static void ixl_disable_legacy(struct i40e_hw *);
136 static void ixl_set_promisc(struct ixl_vsi *);
137 static void ixl_add_multi(struct ixl_vsi *);
138 static void ixl_del_multi(struct ixl_vsi *);
139 static void ixl_register_vlan(void *, struct ifnet *, u16);
140 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
141 static void ixl_setup_vlan_filters(struct ixl_vsi *);
143 static void ixl_init_filters(struct ixl_vsi *);
144 static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
145 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
146 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
147 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
148 static void ixl_del_hw_filters(struct ixl_vsi *, int);
149 static struct ixl_mac_filter *
150 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
151 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
152 static void ixl_free_mac_filters(struct ixl_vsi *vsi);
155 /* Sysctl debug interface */
156 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
157 static void ixl_print_debug_info(struct ixl_pf *);
159 /* The MSI/X Interrupt handlers */
160 static void ixl_intr(void *);
161 static void ixl_msix_que(void *);
162 static void ixl_msix_adminq(void *);
163 static void ixl_handle_mdd_event(struct ixl_pf *);
165 /* Deferred interrupt tasklets */
166 static void ixl_do_adminq(void *, int);
168 /* Sysctl handlers */
169 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
170 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
171 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
172 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
175 static void ixl_add_hw_stats(struct ixl_pf *);
176 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
177 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
178 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
179 struct sysctl_oid_list *,
180 struct i40e_eth_stats *);
181 static void ixl_update_stats_counters(struct ixl_pf *);
182 static void ixl_update_eth_stats(struct ixl_vsi *);
183 static void ixl_update_vsi_stats(struct ixl_vsi *);
184 static void ixl_pf_reset_stats(struct ixl_pf *);
185 static void ixl_vsi_reset_stats(struct ixl_vsi *);
186 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
188 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
191 #ifdef IXL_DEBUG_SYSCTL
192 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
193 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
194 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
195 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
196 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
200 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
202 static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
203 static void ixl_uninit_iov(device_t dev);
204 static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
206 static void ixl_handle_vf_msg(struct ixl_pf *,
207 struct i40e_arq_event_info *);
208 static void ixl_handle_vflr(void *arg, int pending);
210 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
211 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
214 /*********************************************************************
215 * FreeBSD Device Interface Entry Points
216 *********************************************************************/
218 static device_method_t ixl_methods[] = {
219 /* Device interface */
220 DEVMETHOD(device_probe, ixl_probe),
221 DEVMETHOD(device_attach, ixl_attach),
222 DEVMETHOD(device_detach, ixl_detach),
223 DEVMETHOD(device_shutdown, ixl_shutdown),
225 DEVMETHOD(pci_init_iov, ixl_init_iov),
226 DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
227 DEVMETHOD(pci_add_vf, ixl_add_vf),
232 static driver_t ixl_driver = {
233 "ixl", ixl_methods, sizeof(struct ixl_pf),
236 devclass_t ixl_devclass;
237 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
239 MODULE_DEPEND(ixl, pci, 1, 1, 1);
240 MODULE_DEPEND(ixl, ether, 1, 1, 1);
243 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
244 #endif /* DEV_NETMAP */
248 ** Global reset mutex
250 static struct mtx ixl_reset_mtx;
253 ** TUNEABLE PARAMETERS:
256 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
257 "IXL driver parameters");
260 * MSIX should be the default for best performance,
261 * but this allows it to be forced off for testing.
263 static int ixl_enable_msix = 1;
264 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
265 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
266 "Enable MSI-X interrupts");
269 ** Number of descriptors per ring:
270 ** - TX and RX are the same size
272 static int ixl_ringsz = DEFAULT_RING;
273 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
274 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
275 &ixl_ringsz, 0, "Descriptor Ring Size");
278 ** This can be set manually, if left as 0 the
279 ** number of queues will be calculated based
280 ** on cpus and msix vectors available.
282 int ixl_max_queues = 0;
283 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
284 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
285 &ixl_max_queues, 0, "Number of Queues");
288 ** Controls for Interrupt Throttling
289 ** - true/false for dynamic adjustment
290 ** - default values for static ITR
292 int ixl_dynamic_rx_itr = 0;
293 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
294 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
295 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
297 int ixl_dynamic_tx_itr = 0;
298 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
300 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
302 int ixl_rx_itr = IXL_ITR_8K;
303 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
305 &ixl_rx_itr, 0, "RX Interrupt Rate");
307 int ixl_tx_itr = IXL_ITR_4K;
308 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
309 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
310 &ixl_tx_itr, 0, "TX Interrupt Rate");
313 static int ixl_enable_fdir = 1;
314 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
315 /* Rate at which we sample */
316 int ixl_atr_rate = 20;
317 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
321 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
322 #include <dev/netmap/if_ixl_netmap.h>
323 #endif /* DEV_NETMAP */
325 static char *ixl_fc_string[6] = {
334 static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
336 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
337 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
339 /*********************************************************************
340 * Device identification routine
342 * ixl_probe determines if the driver should be loaded on
343 * the hardware based on PCI vendor/device id of the device.
345 * return BUS_PROBE_DEFAULT on success, positive on failure
346 *********************************************************************/
349 ixl_probe(device_t dev)
351 ixl_vendor_info_t *ent;
353 u16 pci_vendor_id, pci_device_id;
354 u16 pci_subvendor_id, pci_subdevice_id;
355 char device_name[256];
356 static bool lock_init = FALSE;
358 INIT_DEBUGOUT("ixl_probe: begin");
360 pci_vendor_id = pci_get_vendor(dev);
361 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
364 pci_device_id = pci_get_device(dev);
365 pci_subvendor_id = pci_get_subvendor(dev);
366 pci_subdevice_id = pci_get_subdevice(dev);
368 ent = ixl_vendor_info_array;
369 while (ent->vendor_id != 0) {
370 if ((pci_vendor_id == ent->vendor_id) &&
371 (pci_device_id == ent->device_id) &&
373 ((pci_subvendor_id == ent->subvendor_id) ||
374 (ent->subvendor_id == 0)) &&
376 ((pci_subdevice_id == ent->subdevice_id) ||
377 (ent->subdevice_id == 0))) {
378 sprintf(device_name, "%s, Version - %s",
379 ixl_strings[ent->index],
381 device_set_desc_copy(dev, device_name);
382 /* One shot mutex init */
383 if (lock_init == FALSE) {
385 mtx_init(&ixl_reset_mtx,
387 "IXL RESET Lock", MTX_DEF);
389 return (BUS_PROBE_DEFAULT);
396 /*********************************************************************
397 * Device initialization routine
399 * The attach entry point is called when the driver is being loaded.
400 * This routine identifies the type of hardware, allocates all resources
401 * and initializes the hardware.
403 * return 0 on success, positive on failure
404 *********************************************************************/
407 ixl_attach(device_t dev)
415 nvlist_t *pf_schema, *vf_schema;
419 INIT_DEBUGOUT("ixl_attach: begin");
421 /* Allocate, clear, and link in our primary soft structure */
422 pf = device_get_softc(dev);
423 pf->dev = pf->osdep.dev = dev;
427 ** Note this assumes we have a single embedded VSI,
428 ** this could be enhanced later to allocate multiple
434 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
436 /* Set up the timer callout */
437 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
440 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
441 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
443 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
445 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
448 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
450 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
453 pf, 0, ixl_current_speed, "A", "Current Port Speed");
455 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
458 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
460 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 OID_AUTO, "rx_itr", CTLFLAG_RW,
463 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
465 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
466 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
468 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
470 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
471 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472 OID_AUTO, "tx_itr", CTLFLAG_RW,
473 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
475 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
476 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
477 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
478 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
480 #ifdef IXL_DEBUG_SYSCTL
481 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
482 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
484 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
486 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
489 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
491 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
492 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
493 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
494 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
496 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
497 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
499 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
501 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
502 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
504 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
507 /* Save off the PCI information */
508 hw->vendor_id = pci_get_vendor(dev);
509 hw->device_id = pci_get_device(dev);
510 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
511 hw->subsystem_vendor_id =
512 pci_read_config(dev, PCIR_SUBVEND_0, 2);
513 hw->subsystem_device_id =
514 pci_read_config(dev, PCIR_SUBDEV_0, 2);
516 hw->bus.device = pci_get_slot(dev);
517 hw->bus.func = pci_get_function(dev);
519 pf->vc_debug_lvl = 1;
521 /* Do PCI setup - map BAR0, etc */
522 if (ixl_allocate_pci_resources(pf)) {
523 device_printf(dev, "Allocation of PCI resources failed\n");
528 /* Create for initial debugging use */
529 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
530 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
531 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
532 ixl_debug_info, "I", "Debug Information");
535 /* Establish a clean starting point */
537 error = i40e_pf_reset(hw);
539 device_printf(dev,"PF reset failure %x\n", error);
544 /* Set admin queue parameters */
545 hw->aq.num_arq_entries = IXL_AQ_LEN;
546 hw->aq.num_asq_entries = IXL_AQ_LEN;
547 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
548 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
550 /* Initialize the shared code */
551 error = i40e_init_shared_code(hw);
553 device_printf(dev,"Unable to initialize the shared code\n");
558 /* Set up the admin queue */
559 error = i40e_init_adminq(hw);
561 device_printf(dev, "The driver for the device stopped "
562 "because the NVM image is newer than expected.\n"
563 "You must install the most recent version of "
564 " the network driver.\n");
567 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
569 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
570 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
571 device_printf(dev, "The driver for the device detected "
572 "a newer version of the NVM image than expected.\n"
573 "Please install the most recent version of the network driver.\n");
574 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
575 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
576 device_printf(dev, "The driver for the device detected "
577 "an older version of the NVM image than expected.\n"
578 "Please update the NVM image.\n");
581 i40e_clear_pxe_mode(hw);
583 /* Get capabilities from the device */
584 error = ixl_get_hw_capabilities(pf);
586 device_printf(dev, "HW capabilities failure!\n");
590 /* Set up host memory cache */
591 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
592 hw->func_caps.num_rx_qp, 0, 0);
594 device_printf(dev, "init_lan_hmc failed: %d\n", error);
598 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
600 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
604 /* Disable LLDP from the firmware */
605 i40e_aq_stop_lldp(hw, TRUE, NULL);
607 i40e_get_mac_addr(hw, hw->mac.addr);
608 error = i40e_validate_mac_addr(hw->mac.addr);
610 device_printf(dev, "validate_mac_addr failed: %d\n", error);
613 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
614 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
616 /* Set up VSI and queues */
617 if (ixl_setup_stations(pf) != 0) {
618 device_printf(dev, "setup stations failed!\n");
623 /* Initialize mac filter list for VSI */
624 SLIST_INIT(&vsi->ftl);
626 /* Set up interrupt routing here */
628 error = ixl_assign_vsi_msix(pf);
630 error = ixl_assign_vsi_legacy(pf);
634 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
635 (hw->aq.fw_maj_ver < 4)) {
637 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
639 device_printf(dev, "link restart failed, aq_err=%d\n",
640 pf->hw.aq.asq_last_status);
643 /* Determine link state */
644 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
645 pf->link_up = i40e_get_link_status(hw);
647 /* Setup OS specific network interface */
648 if (ixl_setup_interface(dev, vsi) != 0) {
649 device_printf(dev, "interface setup failed!\n");
654 error = ixl_switch_config(pf);
656 device_printf(dev, "Initial switch config failed: %d\n", error);
660 /* Limit phy interrupts to link and modules failure */
661 error = i40e_aq_set_phy_int_mask(hw,
662 I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
664 device_printf(dev, "set phy mask failed: %d\n", error);
666 /* Get the bus configuration and set the shared code */
667 bus = ixl_get_bus_info(hw, dev);
668 i40e_set_pci_config_data(hw, bus);
670 /* Initialize statistics */
671 ixl_pf_reset_stats(pf);
672 ixl_update_stats_counters(pf);
673 ixl_add_hw_stats(pf);
675 /* Register for VLAN events */
676 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
677 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
678 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
679 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
682 /* SR-IOV is only supported when MSI-X is in use. */
684 pf_schema = pci_iov_schema_alloc_node();
685 vf_schema = pci_iov_schema_alloc_node();
686 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
687 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
688 IOV_SCHEMA_HASDEFAULT, TRUE);
689 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
690 IOV_SCHEMA_HASDEFAULT, FALSE);
691 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
692 IOV_SCHEMA_HASDEFAULT, FALSE);
694 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
697 "Failed to initialize SR-IOV (error=%d)\n",
703 ixl_netmap_attach(vsi);
704 #endif /* DEV_NETMAP */
706 INIT_DEBUGOUT("ixl_attach: end");
710 if (vsi->ifp != NULL)
713 i40e_shutdown_lan_hmc(hw);
715 i40e_shutdown_adminq(hw);
717 ixl_free_pci_resources(pf);
719 IXL_PF_LOCK_DESTROY(pf);
723 /*********************************************************************
724 * Device removal routine
726 * The detach entry point is called when the driver is being removed.
727 * This routine stops the adapter and deallocates all the resources
728 * that were allocated for driver operation.
730 * return 0 on success, positive on failure
731 *********************************************************************/
734 ixl_detach(device_t dev)
736 struct ixl_pf *pf = device_get_softc(dev);
737 struct i40e_hw *hw = &pf->hw;
738 struct ixl_vsi *vsi = &pf->vsi;
739 struct ixl_queue *que = vsi->queues;
745 INIT_DEBUGOUT("ixl_detach: begin");
747 /* Make sure VLANS are not using driver */
748 if (vsi->ifp->if_vlantrunk != NULL) {
749 device_printf(dev,"Vlan in use, detach first\n");
754 error = pci_iov_detach(dev);
756 device_printf(dev, "SR-IOV in use; detach first.\n");
761 ether_ifdetach(vsi->ifp);
762 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
768 for (int i = 0; i < vsi->num_queues; i++, que++) {
770 taskqueue_drain(que->tq, &que->task);
771 taskqueue_drain(que->tq, &que->tx_task);
772 taskqueue_free(que->tq);
776 /* Shutdown LAN HMC */
777 status = i40e_shutdown_lan_hmc(hw);
780 "Shutdown LAN HMC failed with code %d\n", status);
782 /* Shutdown admin queue */
783 status = i40e_shutdown_adminq(hw);
786 "Shutdown Admin queue failed with code %d\n", status);
788 /* Unregister VLAN events */
789 if (vsi->vlan_attach != NULL)
790 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
791 if (vsi->vlan_detach != NULL)
792 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
794 callout_drain(&pf->timer);
796 netmap_detach(vsi->ifp);
797 #endif /* DEV_NETMAP */
798 ixl_free_pci_resources(pf);
799 bus_generic_detach(dev);
802 IXL_PF_LOCK_DESTROY(pf);
806 /*********************************************************************
808 * Shutdown entry point
810 **********************************************************************/
813 ixl_shutdown(device_t dev)
815 struct ixl_pf *pf = device_get_softc(dev);
823 /*********************************************************************
825 * Get the hardware capabilities
827 **********************************************************************/
830 ixl_get_hw_capabilities(struct ixl_pf *pf)
832 struct i40e_aqc_list_capabilities_element_resp *buf;
833 struct i40e_hw *hw = &pf->hw;
834 device_t dev = pf->dev;
839 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
841 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
842 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
843 device_printf(dev, "Unable to allocate cap memory\n");
847 /* This populates the hw struct */
848 error = i40e_aq_discover_capabilities(hw, buf, len,
849 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
851 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
853 /* retry once with a larger buffer */
857 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
858 device_printf(dev, "capability discovery failed: %d\n",
859 pf->hw.aq.asq_last_status);
863 /* Capture this PF's starting queue pair */
864 pf->qbase = hw->func_caps.base_queue;
867 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
868 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
869 hw->pf_id, hw->func_caps.num_vfs,
870 hw->func_caps.num_msix_vectors,
871 hw->func_caps.num_msix_vectors_vf,
872 hw->func_caps.fd_filters_guaranteed,
873 hw->func_caps.fd_filters_best_effort,
874 hw->func_caps.num_tx_qp,
875 hw->func_caps.num_rx_qp,
876 hw->func_caps.base_queue);
882 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
884 device_t dev = vsi->dev;
886 /* Enable/disable TXCSUM/TSO4 */
887 if (!(ifp->if_capenable & IFCAP_TXCSUM)
888 && !(ifp->if_capenable & IFCAP_TSO4)) {
889 if (mask & IFCAP_TXCSUM) {
890 ifp->if_capenable |= IFCAP_TXCSUM;
891 /* enable TXCSUM, restore TSO if previously enabled */
892 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
893 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
894 ifp->if_capenable |= IFCAP_TSO4;
897 else if (mask & IFCAP_TSO4) {
898 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
899 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
901 "TSO4 requires txcsum, enabling both...\n");
903 } else if((ifp->if_capenable & IFCAP_TXCSUM)
904 && !(ifp->if_capenable & IFCAP_TSO4)) {
905 if (mask & IFCAP_TXCSUM)
906 ifp->if_capenable &= ~IFCAP_TXCSUM;
907 else if (mask & IFCAP_TSO4)
908 ifp->if_capenable |= IFCAP_TSO4;
909 } else if((ifp->if_capenable & IFCAP_TXCSUM)
910 && (ifp->if_capenable & IFCAP_TSO4)) {
911 if (mask & IFCAP_TXCSUM) {
912 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
913 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
915 "TSO4 requires txcsum, disabling both...\n");
916 } else if (mask & IFCAP_TSO4)
917 ifp->if_capenable &= ~IFCAP_TSO4;
920 /* Enable/disable TXCSUM_IPV6/TSO6 */
921 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
922 && !(ifp->if_capenable & IFCAP_TSO6)) {
923 if (mask & IFCAP_TXCSUM_IPV6) {
924 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
925 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
926 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
927 ifp->if_capenable |= IFCAP_TSO6;
929 } else if (mask & IFCAP_TSO6) {
930 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
931 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
933 "TSO6 requires txcsum6, enabling both...\n");
935 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936 && !(ifp->if_capenable & IFCAP_TSO6)) {
937 if (mask & IFCAP_TXCSUM_IPV6)
938 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
939 else if (mask & IFCAP_TSO6)
940 ifp->if_capenable |= IFCAP_TSO6;
941 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
942 && (ifp->if_capenable & IFCAP_TSO6)) {
943 if (mask & IFCAP_TXCSUM_IPV6) {
944 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
945 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
947 "TSO6 requires txcsum6, disabling both...\n");
948 } else if (mask & IFCAP_TSO6)
949 ifp->if_capenable &= ~IFCAP_TSO6;
953 /*********************************************************************
956 * ixl_ioctl is called when the user wants to configure the
959 * return 0 on success, positive on failure
960 **********************************************************************/
963 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
965 struct ixl_vsi *vsi = ifp->if_softc;
966 struct ixl_pf *pf = vsi->back;
967 struct ifreq *ifr = (struct ifreq *) data;
968 #if defined(INET) || defined(INET6)
969 struct ifaddr *ifa = (struct ifaddr *)data;
970 bool avoid_reset = FALSE;
978 if (ifa->ifa_addr->sa_family == AF_INET)
982 if (ifa->ifa_addr->sa_family == AF_INET6)
985 #if defined(INET) || defined(INET6)
987 ** Calling init results in link renegotiation,
988 ** so we avoid doing it when possible.
991 ifp->if_flags |= IFF_UP;
992 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
995 if (!(ifp->if_flags & IFF_NOARP))
996 arp_ifinit(ifp, ifa);
999 error = ether_ioctl(ifp, command, data);
1003 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1004 if (ifr->ifr_mtu > IXL_MAX_FRAME -
1005 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1009 ifp->if_mtu = ifr->ifr_mtu;
1010 vsi->max_frame_size =
1011 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1012 + ETHER_VLAN_ENCAP_LEN;
1013 ixl_init_locked(pf);
1018 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1020 if (ifp->if_flags & IFF_UP) {
1021 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1022 if ((ifp->if_flags ^ pf->if_flags) &
1023 (IFF_PROMISC | IFF_ALLMULTI)) {
1024 ixl_set_promisc(vsi);
1027 ixl_init_locked(pf);
1029 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1031 pf->if_flags = ifp->if_flags;
1035 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1036 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1038 ixl_disable_intr(vsi);
1040 ixl_enable_intr(vsi);
1045 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1046 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1048 ixl_disable_intr(vsi);
1050 ixl_enable_intr(vsi);
1056 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1057 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1061 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1062 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1064 ixl_cap_txcsum_tso(vsi, ifp, mask);
1066 if (mask & IFCAP_RXCSUM)
1067 ifp->if_capenable ^= IFCAP_RXCSUM;
1068 if (mask & IFCAP_RXCSUM_IPV6)
1069 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1070 if (mask & IFCAP_LRO)
1071 ifp->if_capenable ^= IFCAP_LRO;
1072 if (mask & IFCAP_VLAN_HWTAGGING)
1073 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1074 if (mask & IFCAP_VLAN_HWFILTER)
1075 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1076 if (mask & IFCAP_VLAN_HWTSO)
1077 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1078 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1080 ixl_init_locked(pf);
1083 VLAN_CAPABILITIES(ifp);
1089 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1090 error = ether_ioctl(ifp, command, data);
1098 /*********************************************************************
1101 * This routine is used in two ways. It is used by the stack as
1102 * init entry point in network interface structure. It is also used
1103 * by the driver as a hw/sw initialization routine to get to a
1106 * return 0 on success, positive on failure
1107 **********************************************************************/
1110 ixl_init_locked(struct ixl_pf *pf)
1112 struct i40e_hw *hw = &pf->hw;
1113 struct ixl_vsi *vsi = &pf->vsi;
1114 struct ifnet *ifp = vsi->ifp;
1115 device_t dev = pf->dev;
1116 struct i40e_filter_control_settings filter;
1117 u8 tmpaddr[ETHER_ADDR_LEN];
1120 mtx_assert(&pf->pf_mtx, MA_OWNED);
1121 INIT_DEBUGOUT("ixl_init: begin");
1124 /* Get the latest mac address... User might use a LAA */
1125 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1126 I40E_ETH_LENGTH_OF_ADDRESS);
1127 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1128 i40e_validate_mac_addr(tmpaddr)) {
1129 bcopy(tmpaddr, hw->mac.addr,
1130 I40E_ETH_LENGTH_OF_ADDRESS);
1131 ret = i40e_aq_mac_address_write(hw,
1132 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1133 hw->mac.addr, NULL);
1135 device_printf(dev, "LLA address"
1136 "change failed!!\n");
1141 /* Set the various hardware offload abilities */
1142 ifp->if_hwassist = 0;
1143 if (ifp->if_capenable & IFCAP_TSO)
1144 ifp->if_hwassist |= CSUM_TSO;
1145 if (ifp->if_capenable & IFCAP_TXCSUM)
1146 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1147 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1148 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1150 /* Set up the device filtering */
1151 bzero(&filter, sizeof(filter));
1152 filter.enable_ethtype = TRUE;
1153 filter.enable_macvlan = TRUE;
1155 filter.enable_fdir = TRUE;
1157 if (i40e_set_filter_control(hw, &filter))
1158 device_printf(dev, "set_filter_control() failed\n");
1161 ixl_config_rss(vsi);
1164 ** Prepare the VSI: rings, hmc contexts, etc...
1166 if (ixl_initialize_vsi(vsi)) {
1167 device_printf(dev, "initialize vsi failed!!\n");
1171 /* Add protocol filters to list */
1172 ixl_init_filters(vsi);
1174 /* Setup vlan's if needed */
1175 ixl_setup_vlan_filters(vsi);
1177 /* Start the local timer */
1178 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1180 /* Set up MSI/X routing and the ITR settings */
1181 if (ixl_enable_msix) {
1182 ixl_configure_msix(pf);
1183 ixl_configure_itr(pf);
1185 ixl_configure_legacy(pf);
1187 ixl_enable_rings(vsi);
1189 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1191 ixl_reconfigure_filters(vsi);
1193 /* Set MTU in hardware*/
1194 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1197 device_printf(vsi->dev,
1198 "aq_set_mac_config in init error, code %d\n",
1201 /* And now turn on interrupts */
1202 ixl_enable_intr(vsi);
1204 /* Now inform the stack we're ready */
1205 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1206 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1214 struct ixl_pf *pf = arg;
1217 ixl_init_locked(pf);
1224 ** MSIX Interrupt Handlers and Tasklets
1228 ixl_handle_que(void *context, int pending)
1230 struct ixl_queue *que = context;
1231 struct ixl_vsi *vsi = que->vsi;
1232 struct i40e_hw *hw = vsi->hw;
1233 struct tx_ring *txr = &que->txr;
1234 struct ifnet *ifp = vsi->ifp;
1237 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1238 more = ixl_rxeof(que, IXL_RX_LIMIT);
1241 if (!drbr_empty(ifp, txr->br))
1242 ixl_mq_start_locked(ifp, txr);
1245 taskqueue_enqueue(que->tq, &que->task);
1250 /* Reenable this interrupt - hmmm */
1251 ixl_enable_queue(hw, que->me);
1256 /*********************************************************************
1258 * Legacy Interrupt Service routine
1260 **********************************************************************/
1264 struct ixl_pf *pf = arg;
1265 struct i40e_hw *hw = &pf->hw;
1266 struct ixl_vsi *vsi = &pf->vsi;
1267 struct ixl_queue *que = vsi->queues;
1268 struct ifnet *ifp = vsi->ifp;
1269 struct tx_ring *txr = &que->txr;
1270 u32 reg, icr0, mask;
1271 bool more_tx, more_rx;
1275 /* Protect against spurious interrupts */
1276 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1279 icr0 = rd32(hw, I40E_PFINT_ICR0);
1281 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1282 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1283 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1285 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1288 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1289 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1292 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1293 taskqueue_enqueue(pf->tq, &pf->adminq);
1297 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1300 more_tx = ixl_txeof(que);
1301 if (!drbr_empty(vsi->ifp, txr->br))
1305 /* re-enable other interrupt causes */
1306 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1308 /* And now the queues */
1309 reg = rd32(hw, I40E_QINT_RQCTL(0));
1310 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1311 wr32(hw, I40E_QINT_RQCTL(0), reg);
1313 reg = rd32(hw, I40E_QINT_TQCTL(0));
1314 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1315 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1316 wr32(hw, I40E_QINT_TQCTL(0), reg);
1318 ixl_enable_legacy(hw);
1324 /*********************************************************************
1326 * MSIX VSI Interrupt Service routine
1328 **********************************************************************/
1330 ixl_msix_que(void *arg)
1332 struct ixl_queue *que = arg;
1333 struct ixl_vsi *vsi = que->vsi;
1334 struct i40e_hw *hw = vsi->hw;
1335 struct tx_ring *txr = &que->txr;
1336 bool more_tx, more_rx;
1338 /* Protect against spurious interrupts */
1339 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1344 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1347 more_tx = ixl_txeof(que);
1349 ** Make certain that if the stack
1350 ** has anything queued the task gets
1351 ** scheduled to handle it.
1353 if (!drbr_empty(vsi->ifp, txr->br))
1357 ixl_set_queue_rx_itr(que);
1358 ixl_set_queue_tx_itr(que);
1360 if (more_tx || more_rx)
1361 taskqueue_enqueue(que->tq, &que->task);
1363 ixl_enable_queue(hw, que->me);
1369 /*********************************************************************
1371 * MSIX Admin Queue Interrupt Service routine
1373 **********************************************************************/
1375 ixl_msix_adminq(void *arg)
1377 struct ixl_pf *pf = arg;
1378 struct i40e_hw *hw = &pf->hw;
1383 reg = rd32(hw, I40E_PFINT_ICR0);
1384 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1386 /* Check on the cause */
1387 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1388 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1390 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1391 ixl_handle_mdd_event(pf);
1392 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1396 if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1397 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1398 taskqueue_enqueue(pf->tq, &pf->vflr_task);
1402 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1403 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1404 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1406 taskqueue_enqueue(pf->tq, &pf->adminq);
1410 /*********************************************************************
1412 * Media Ioctl callback
1414 * This routine is called whenever the user queries the status of
1415 * the interface using ifconfig.
1417 **********************************************************************/
1419 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1421 struct ixl_vsi *vsi = ifp->if_softc;
1422 struct ixl_pf *pf = vsi->back;
1423 struct i40e_hw *hw = &pf->hw;
1425 INIT_DEBUGOUT("ixl_media_status: begin");
1428 hw->phy.get_link_info = TRUE;
1429 pf->link_up = i40e_get_link_status(hw);
1430 ixl_update_link_status(pf);
1432 ifmr->ifm_status = IFM_AVALID;
1433 ifmr->ifm_active = IFM_ETHER;
1440 ifmr->ifm_status |= IFM_ACTIVE;
1441 /* Hardware is always full-duplex */
1442 ifmr->ifm_active |= IFM_FDX;
1444 switch (hw->phy.link_info.phy_type) {
1446 case I40E_PHY_TYPE_100BASE_TX:
1447 ifmr->ifm_active |= IFM_100_TX;
1450 case I40E_PHY_TYPE_1000BASE_T:
1451 ifmr->ifm_active |= IFM_1000_T;
1453 case I40E_PHY_TYPE_1000BASE_SX:
1454 ifmr->ifm_active |= IFM_1000_SX;
1456 case I40E_PHY_TYPE_1000BASE_LX:
1457 ifmr->ifm_active |= IFM_1000_LX;
1460 case I40E_PHY_TYPE_10GBASE_CR1:
1461 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1462 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1463 /* Using this until a real KR media type */
1464 case I40E_PHY_TYPE_10GBASE_KR:
1465 case I40E_PHY_TYPE_10GBASE_KX4:
1466 ifmr->ifm_active |= IFM_10G_TWINAX;
1468 case I40E_PHY_TYPE_10GBASE_SR:
1469 ifmr->ifm_active |= IFM_10G_SR;
1471 case I40E_PHY_TYPE_10GBASE_LR:
1472 ifmr->ifm_active |= IFM_10G_LR;
1474 case I40E_PHY_TYPE_10GBASE_T:
1475 ifmr->ifm_active |= IFM_10G_T;
1478 case I40E_PHY_TYPE_40GBASE_CR4:
1479 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1480 ifmr->ifm_active |= IFM_40G_CR4;
1482 case I40E_PHY_TYPE_40GBASE_SR4:
1483 ifmr->ifm_active |= IFM_40G_SR4;
1485 case I40E_PHY_TYPE_40GBASE_LR4:
1486 ifmr->ifm_active |= IFM_40G_LR4;
1489 ** Set these to CR4 because OS does not
1490 ** have types available yet.
1492 case I40E_PHY_TYPE_40GBASE_KR4:
1493 case I40E_PHY_TYPE_XLAUI:
1494 case I40E_PHY_TYPE_XLPPI:
1495 case I40E_PHY_TYPE_40GBASE_AOC:
1496 ifmr->ifm_active |= IFM_40G_CR4;
1499 ifmr->ifm_active |= IFM_UNKNOWN;
1502 /* Report flow control status as well */
1503 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1504 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1505 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1506 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1513 /*********************************************************************
1515 * Media Ioctl callback
1517 * This routine is called when the user changes speed/duplex using
1518 * media/mediopt option with ifconfig.
1520 **********************************************************************/
1522 ixl_media_change(struct ifnet * ifp)
1524 struct ixl_vsi *vsi = ifp->if_softc;
1525 struct ifmedia *ifm = &vsi->media;
1527 INIT_DEBUGOUT("ixl_media_change: begin");
1529 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1532 if_printf(ifp, "Media change is currently not supported.\n");
1540 ** ATR: Application Targetted Receive - creates a filter
1541 ** based on TX flow info that will keep the receive
1542 ** portion of the flow on the same queue. Based on the
1543 ** implementation this is only available for TCP connections
1546 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1548 struct ixl_vsi *vsi = que->vsi;
1549 struct tx_ring *txr = &que->txr;
1550 struct i40e_filter_program_desc *FDIR;
1554 /* check if ATR is enabled and sample rate */
1555 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1558 ** We sample all TCP SYN/FIN packets,
1559 ** or at the selected sample rate
1562 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1563 (txr->atr_count < txr->atr_rate))
1567 /* Get a descriptor to use */
1568 idx = txr->next_avail;
1569 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1570 if (++idx == que->num_desc)
1573 txr->next_avail = idx;
1575 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1576 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1578 ptype |= (etype == ETHERTYPE_IP) ?
1579 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1580 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1581 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1582 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1584 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1586 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1589 ** We use the TCP TH_FIN as a trigger to remove
1590 ** the filter, otherwise its an update.
1592 dtype |= (th->th_flags & TH_FIN) ?
1593 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1594 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1595 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1596 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1598 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1599 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1601 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1602 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1604 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1605 FDIR->dtype_cmd_cntindex = htole32(dtype);
1612 ixl_set_promisc(struct ixl_vsi *vsi)
1614 struct ifnet *ifp = vsi->ifp;
1615 struct i40e_hw *hw = vsi->hw;
1617 bool uni = FALSE, multi = FALSE;
1619 if (ifp->if_flags & IFF_ALLMULTI)
1621 else { /* Need to count the multicast addresses */
1622 struct ifmultiaddr *ifma;
1623 if_maddr_rlock(ifp);
1624 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1625 if (ifma->ifma_addr->sa_family != AF_LINK)
1627 if (mcnt == MAX_MULTICAST_ADDR)
1631 if_maddr_runlock(ifp);
1634 if (mcnt >= MAX_MULTICAST_ADDR)
1636 if (ifp->if_flags & IFF_PROMISC)
1639 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1640 vsi->seid, uni, NULL);
1641 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1642 vsi->seid, multi, NULL);
1646 /*********************************************************************
1649 * Routines for multicast and vlan filter management.
1651 *********************************************************************/
1653 ixl_add_multi(struct ixl_vsi *vsi)
1655 struct ifmultiaddr *ifma;
1656 struct ifnet *ifp = vsi->ifp;
1657 struct i40e_hw *hw = vsi->hw;
1658 int mcnt = 0, flags;
1660 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1662 if_maddr_rlock(ifp);
1664 ** First just get a count, to decide if we
1665 ** we simply use multicast promiscuous.
1667 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1668 if (ifma->ifma_addr->sa_family != AF_LINK)
1672 if_maddr_runlock(ifp);
1674 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1675 /* delete existing MC filters */
1676 ixl_del_hw_filters(vsi, mcnt);
1677 i40e_aq_set_vsi_multicast_promiscuous(hw,
1678 vsi->seid, TRUE, NULL);
1683 if_maddr_rlock(ifp);
1684 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1685 if (ifma->ifma_addr->sa_family != AF_LINK)
1687 ixl_add_mc_filter(vsi,
1688 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1691 if_maddr_runlock(ifp);
1693 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1694 ixl_add_hw_filters(vsi, flags, mcnt);
1697 IOCTL_DEBUGOUT("ixl_add_multi: end");
1702 ixl_del_multi(struct ixl_vsi *vsi)
1704 struct ifnet *ifp = vsi->ifp;
1705 struct ifmultiaddr *ifma;
1706 struct ixl_mac_filter *f;
1710 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1712 /* Search for removed multicast addresses */
1713 if_maddr_rlock(ifp);
1714 SLIST_FOREACH(f, &vsi->ftl, next) {
1715 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1717 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1718 if (ifma->ifma_addr->sa_family != AF_LINK)
1720 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1721 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1726 if (match == FALSE) {
1727 f->flags |= IXL_FILTER_DEL;
1732 if_maddr_runlock(ifp);
1735 ixl_del_hw_filters(vsi, mcnt);
1739 /*********************************************************************
1742 * This routine checks for link status,updates statistics,
1743 * and runs the watchdog check.
1745 **********************************************************************/
1748 ixl_local_timer(void *arg)
1750 struct ixl_pf *pf = arg;
1751 struct i40e_hw *hw = &pf->hw;
1752 struct ixl_vsi *vsi = &pf->vsi;
1753 struct ixl_queue *que = vsi->queues;
1754 device_t dev = pf->dev;
1758 mtx_assert(&pf->pf_mtx, MA_OWNED);
1760 /* Fire off the adminq task */
1761 taskqueue_enqueue(pf->tq, &pf->adminq);
1764 ixl_update_stats_counters(pf);
1767 ** Check status of the queues
1769 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1770 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1772 for (int i = 0; i < vsi->num_queues; i++,que++) {
1773 /* Any queues with outstanding work get a sw irq */
1775 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1777 ** Each time txeof runs without cleaning, but there
1778 ** are uncleaned descriptors it increments busy. If
1779 ** we get to 5 we declare it hung.
1781 if (que->busy == IXL_QUEUE_HUNG) {
1783 /* Mark the queue as inactive */
1784 vsi->active_queues &= ~((u64)1 << que->me);
1787 /* Check if we've come back from hung */
1788 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1789 vsi->active_queues |= ((u64)1 << que->me);
1791 if (que->busy >= IXL_MAX_TX_BUSY) {
1793 device_printf(dev,"Warning queue %d "
1794 "appears to be hung!\n", i);
1796 que->busy = IXL_QUEUE_HUNG;
1800 /* Only reinit if all queues show hung */
1801 if (hung == vsi->num_queues)
1804 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1808 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1809 ixl_init_locked(pf);
1813 ** Note: this routine updates the OS on the link state
1814 ** the real check of the hardware only happens with
1815 ** a link interrupt.
1818 ixl_update_link_status(struct ixl_pf *pf)
1820 struct ixl_vsi *vsi = &pf->vsi;
1821 struct i40e_hw *hw = &pf->hw;
1822 struct ifnet *ifp = vsi->ifp;
1823 device_t dev = pf->dev;
1826 if (vsi->link_active == FALSE) {
1827 pf->fc = hw->fc.current_mode;
1829 device_printf(dev,"Link is up %d Gbps %s,"
1830 " Flow Control: %s\n",
1832 I40E_LINK_SPEED_40GB)? 40:10),
1833 "Full Duplex", ixl_fc_string[pf->fc]);
1835 vsi->link_active = TRUE;
1837 ** Warn user if link speed on NPAR enabled
1838 ** partition is not at least 10GB
1840 if (hw->func_caps.npar_enable &&
1841 (hw->phy.link_info.link_speed ==
1842 I40E_LINK_SPEED_1GB ||
1843 hw->phy.link_info.link_speed ==
1844 I40E_LINK_SPEED_100MB))
1845 device_printf(dev, "The partition detected"
1846 "link speed that is less than 10Gbps\n");
1847 if_link_state_change(ifp, LINK_STATE_UP);
1849 } else { /* Link down */
1850 if (vsi->link_active == TRUE) {
1852 device_printf(dev,"Link is Down\n");
1853 if_link_state_change(ifp, LINK_STATE_DOWN);
1854 vsi->link_active = FALSE;
1861 /*********************************************************************
1863 * This routine disables all traffic on the adapter by issuing a
1864 * global reset on the MAC and deallocates TX/RX buffers.
1866 **********************************************************************/
1869 ixl_stop(struct ixl_pf *pf)
1871 struct ixl_vsi *vsi = &pf->vsi;
1872 struct ifnet *ifp = vsi->ifp;
1874 mtx_assert(&pf->pf_mtx, MA_OWNED);
1876 INIT_DEBUGOUT("ixl_stop: begin\n");
1877 if (pf->num_vfs == 0)
1878 ixl_disable_intr(vsi);
1880 ixl_disable_rings_intr(vsi);
1881 ixl_disable_rings(vsi);
1883 /* Tell the stack that the interface is no longer active */
1884 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1886 /* Stop the local timer */
1887 callout_stop(&pf->timer);
1893 /*********************************************************************
1895 * Setup MSIX Interrupt resources and handlers for the VSI
1897 **********************************************************************/
1899 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1901 device_t dev = pf->dev;
1902 struct ixl_vsi *vsi = &pf->vsi;
1903 struct ixl_queue *que = vsi->queues;
1908 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1909 &rid, RF_SHAREABLE | RF_ACTIVE);
1910 if (pf->res == NULL) {
1911 device_printf(dev,"Unable to allocate"
1912 " bus resource: vsi legacy/msi interrupt\n");
1916 /* Set the handler function */
1917 error = bus_setup_intr(dev, pf->res,
1918 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1919 ixl_intr, pf, &pf->tag);
1922 device_printf(dev, "Failed to register legacy/msi handler");
1925 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1926 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1927 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1928 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1929 taskqueue_thread_enqueue, &que->tq);
1930 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1931 device_get_nameunit(dev));
1932 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1935 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1938 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1939 taskqueue_thread_enqueue, &pf->tq);
1940 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1941 device_get_nameunit(dev));
1947 /*********************************************************************
1949 * Setup MSIX Interrupt resources and handlers for the VSI
1951 **********************************************************************/
1953 ixl_assign_vsi_msix(struct ixl_pf *pf)
1955 device_t dev = pf->dev;
1956 struct ixl_vsi *vsi = &pf->vsi;
1957 struct ixl_queue *que = vsi->queues;
1958 struct tx_ring *txr;
1959 int error, rid, vector = 0;
1964 /* Admin Que is vector 0*/
1966 pf->res = bus_alloc_resource_any(dev,
1967 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1969 device_printf(dev,"Unable to allocate"
1970 " bus resource: Adminq interrupt [%d]\n", rid);
1973 /* Set the adminq vector and handler */
1974 error = bus_setup_intr(dev, pf->res,
1975 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1976 ixl_msix_adminq, pf, &pf->tag);
1979 device_printf(dev, "Failed to register Admin que handler");
1982 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1983 pf->admvec = vector;
1984 /* Tasklet for Admin Queue */
1985 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1988 TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1991 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1992 taskqueue_thread_enqueue, &pf->tq);
1993 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1994 device_get_nameunit(pf->dev));
1997 /* Now set up the stations */
1998 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2002 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2003 RF_SHAREABLE | RF_ACTIVE);
2004 if (que->res == NULL) {
2005 device_printf(dev,"Unable to allocate"
2006 " bus resource: que interrupt [%d]\n", vector);
2009 /* Set the handler function */
2010 error = bus_setup_intr(dev, que->res,
2011 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2012 ixl_msix_que, que, &que->tag);
2015 device_printf(dev, "Failed to register que handler");
2018 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2019 /* Bind the vector to a CPU */
2021 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2023 bus_bind_intr(dev, que->res, cpu_id);
2025 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2026 TASK_INIT(&que->task, 0, ixl_handle_que, que);
2027 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2028 taskqueue_thread_enqueue, &que->tq);
2030 CPU_SETOF(cpu_id, &cpu_mask);
2031 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2032 &cpu_mask, "%s (bucket %d)",
2033 device_get_nameunit(dev), cpu_id);
2035 taskqueue_start_threads(&que->tq, 1, PI_NET,
2036 "%s que", device_get_nameunit(dev));
2045 * Allocate MSI/X vectors
2048 ixl_init_msix(struct ixl_pf *pf)
2050 device_t dev = pf->dev;
2051 int rid, want, vectors, queues, available;
2053 /* Override by tuneable */
2054 if (ixl_enable_msix == 0)
2058 ** When used in a virtualized environment
2059 ** PCI BUSMASTER capability may not be set
2060 ** so explicity set it here and rewrite
2061 ** the ENABLE in the MSIX control register
2062 ** at this point to cause the host to
2063 ** successfully initialize us.
2068 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2069 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2070 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2071 pci_find_cap(dev, PCIY_MSIX, &rid);
2072 rid += PCIR_MSIX_CTRL;
2073 msix_ctrl = pci_read_config(dev, rid, 2);
2074 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2075 pci_write_config(dev, rid, msix_ctrl, 2);
2078 /* First try MSI/X */
2079 rid = PCIR_BAR(IXL_BAR);
2080 pf->msix_mem = bus_alloc_resource_any(dev,
2081 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2082 if (!pf->msix_mem) {
2083 /* May not be enabled */
2084 device_printf(pf->dev,
2085 "Unable to map MSIX table \n");
2089 available = pci_msix_count(dev);
2090 if (available == 0) { /* system has msix disabled */
2091 bus_release_resource(dev, SYS_RES_MEMORY,
2093 pf->msix_mem = NULL;
2097 /* Figure out a reasonable auto config value */
2098 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2100 /* Override with hardcoded value if sane */
2101 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2102 queues = ixl_max_queues;
2105 /* If we're doing RSS, clamp at the number of RSS buckets */
2106 if (queues > rss_getnumbuckets())
2107 queues = rss_getnumbuckets();
2111 ** Want one vector (RX/TX pair) per queue
2112 ** plus an additional for the admin queue.
2115 if (want <= available) /* Have enough */
2118 device_printf(pf->dev,
2119 "MSIX Configuration Problem, "
2120 "%d vectors available but %d wanted!\n",
2122 return (0); /* Will go to Legacy setup */
2125 if (pci_alloc_msix(dev, &vectors) == 0) {
2126 device_printf(pf->dev,
2127 "Using MSIX interrupts with %d vectors\n", vectors);
2129 pf->vsi.num_queues = queues;
2132 * If we're doing RSS, the number of queues needs to
2133 * match the number of RSS buckets that are configured.
2135 * + If there's more queues than RSS buckets, we'll end
2136 * up with queues that get no traffic.
2138 * + If there's more RSS buckets than queues, we'll end
2139 * up having multiple RSS buckets map to the same queue,
2140 * so there'll be some contention.
2142 if (queues != rss_getnumbuckets()) {
2144 "%s: queues (%d) != RSS buckets (%d)"
2145 "; performance will be impacted.\n",
2146 __func__, queues, rss_getnumbuckets());
2152 vectors = pci_msi_count(dev);
2153 pf->vsi.num_queues = 1;
2156 ixl_enable_msix = 0;
2157 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2158 device_printf(pf->dev,"Using an MSI interrupt\n");
2161 device_printf(pf->dev,"Using a Legacy interrupt\n");
2168 * Plumb MSI/X vectors
2171 ixl_configure_msix(struct ixl_pf *pf)
2173 struct i40e_hw *hw = &pf->hw;
2174 struct ixl_vsi *vsi = &pf->vsi;
2178 /* First set up the adminq - vector 0 */
2179 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2180 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2182 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2183 I40E_PFINT_ICR0_ENA_GRST_MASK |
2184 I40E_PFINT_ICR0_HMC_ERR_MASK |
2185 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2186 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2187 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2188 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2189 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2191 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2192 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2194 wr32(hw, I40E_PFINT_DYN_CTL0,
2195 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2196 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2198 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2200 /* Next configure the queues */
2201 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2202 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2203 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2205 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2206 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2207 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2208 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2209 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2210 wr32(hw, I40E_QINT_RQCTL(i), reg);
2212 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2213 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2214 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2215 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2216 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2217 if (i == (vsi->num_queues - 1))
2218 reg |= (IXL_QUEUE_EOL
2219 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2220 wr32(hw, I40E_QINT_TQCTL(i), reg);
2225 * Configure for MSI single vector operation
2228 ixl_configure_legacy(struct ixl_pf *pf)
2230 struct i40e_hw *hw = &pf->hw;
2234 wr32(hw, I40E_PFINT_ITR0(0), 0);
2235 wr32(hw, I40E_PFINT_ITR0(1), 0);
2238 /* Setup "other" causes */
2239 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2240 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2241 | I40E_PFINT_ICR0_ENA_GRST_MASK
2242 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2243 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2244 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2245 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2246 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2247 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2248 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2250 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2252 /* SW_ITR_IDX = 0, but don't change INTENA */
2253 wr32(hw, I40E_PFINT_DYN_CTL0,
2254 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2255 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2256 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2257 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2259 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2260 wr32(hw, I40E_PFINT_LNKLST0, 0);
2262 /* Associate the queue pair to the vector and enable the q int */
2263 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2264 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2265 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2266 wr32(hw, I40E_QINT_RQCTL(0), reg);
2268 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2269 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2270 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2271 wr32(hw, I40E_QINT_TQCTL(0), reg);
2273 /* Next enable the queue pair */
2274 reg = rd32(hw, I40E_QTX_ENA(0));
2275 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2276 wr32(hw, I40E_QTX_ENA(0), reg);
2278 reg = rd32(hw, I40E_QRX_ENA(0));
2279 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2280 wr32(hw, I40E_QRX_ENA(0), reg);
2285 * Set the Initial ITR state
2288 ixl_configure_itr(struct ixl_pf *pf)
2290 struct i40e_hw *hw = &pf->hw;
2291 struct ixl_vsi *vsi = &pf->vsi;
2292 struct ixl_queue *que = vsi->queues;
2294 vsi->rx_itr_setting = ixl_rx_itr;
2295 if (ixl_dynamic_rx_itr)
2296 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2297 vsi->tx_itr_setting = ixl_tx_itr;
2298 if (ixl_dynamic_tx_itr)
2299 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2301 for (int i = 0; i < vsi->num_queues; i++, que++) {
2302 struct tx_ring *txr = &que->txr;
2303 struct rx_ring *rxr = &que->rxr;
2305 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2306 vsi->rx_itr_setting);
2307 rxr->itr = vsi->rx_itr_setting;
2308 rxr->latency = IXL_AVE_LATENCY;
2309 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2310 vsi->tx_itr_setting);
2311 txr->itr = vsi->tx_itr_setting;
2312 txr->latency = IXL_AVE_LATENCY;
2318 ixl_allocate_pci_resources(struct ixl_pf *pf)
2321 device_t dev = pf->dev;
2324 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2327 if (!(pf->pci_mem)) {
2328 device_printf(dev,"Unable to allocate bus resource: memory\n");
2332 pf->osdep.mem_bus_space_tag =
2333 rman_get_bustag(pf->pci_mem);
2334 pf->osdep.mem_bus_space_handle =
2335 rman_get_bushandle(pf->pci_mem);
2336 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2337 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2338 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2340 pf->hw.back = &pf->osdep;
2343 ** Now setup MSI or MSI/X, should
2344 ** return us the number of supported
2345 ** vectors. (Will be 1 for MSI)
2347 pf->msix = ixl_init_msix(pf);
2352 ixl_free_pci_resources(struct ixl_pf * pf)
2354 struct ixl_vsi *vsi = &pf->vsi;
2355 struct ixl_queue *que = vsi->queues;
2356 device_t dev = pf->dev;
2359 memrid = PCIR_BAR(IXL_BAR);
2361 /* We may get here before stations are setup */
2362 if ((!ixl_enable_msix) || (que == NULL))
2366 ** Release all msix VSI resources:
2368 for (int i = 0; i < vsi->num_queues; i++, que++) {
2369 rid = que->msix + 1;
2370 if (que->tag != NULL) {
2371 bus_teardown_intr(dev, que->res, que->tag);
2374 if (que->res != NULL)
2375 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2379 /* Clean the AdminQ interrupt last */
2380 if (pf->admvec) /* we are doing MSIX */
2381 rid = pf->admvec + 1;
2383 (pf->msix != 0) ? (rid = 1):(rid = 0);
2385 if (pf->tag != NULL) {
2386 bus_teardown_intr(dev, pf->res, pf->tag);
2389 if (pf->res != NULL)
2390 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2393 pci_release_msi(dev);
2395 if (pf->msix_mem != NULL)
2396 bus_release_resource(dev, SYS_RES_MEMORY,
2397 memrid, pf->msix_mem);
2399 if (pf->pci_mem != NULL)
2400 bus_release_resource(dev, SYS_RES_MEMORY,
2401 PCIR_BAR(0), pf->pci_mem);
2407 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2409 /* Display supported media types */
2410 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2411 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2413 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2414 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2415 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2416 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2417 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2418 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2420 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2421 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2422 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2423 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2424 phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2425 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2426 phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2427 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2428 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2430 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2431 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2432 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2433 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2434 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2435 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2437 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2438 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2439 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2440 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2441 phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2442 /* KR4 uses CR4 until the OS has the real media type */
2443 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2444 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2446 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2447 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2448 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2449 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2452 /*********************************************************************
2454 * Setup networking device structure and register an interface.
2456 **********************************************************************/
2458 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2461 struct i40e_hw *hw = vsi->hw;
2462 struct ixl_queue *que = vsi->queues;
2463 struct i40e_aq_get_phy_abilities_resp abilities;
2464 enum i40e_status_code aq_error = 0;
2466 INIT_DEBUGOUT("ixl_setup_interface: begin");
2468 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2470 device_printf(dev, "can not allocate ifnet structure\n");
2473 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2474 ifp->if_mtu = ETHERMTU;
2475 ifp->if_baudrate = 4000000000; // ??
2476 ifp->if_init = ixl_init;
2477 ifp->if_softc = vsi;
2478 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2479 ifp->if_ioctl = ixl_ioctl;
2481 #if __FreeBSD_version >= 1100036
2482 if_setgetcounterfn(ifp, ixl_get_counter);
2485 ifp->if_transmit = ixl_mq_start;
2487 ifp->if_qflush = ixl_qflush;
2489 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2491 vsi->max_frame_size =
2492 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2493 + ETHER_VLAN_ENCAP_LEN;
2496 * Tell the upper layer(s) we support long frames.
2498 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2500 ifp->if_capabilities |= IFCAP_HWCSUM;
2501 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2502 ifp->if_capabilities |= IFCAP_TSO;
2503 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2504 ifp->if_capabilities |= IFCAP_LRO;
2506 /* VLAN capabilties */
2507 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2510 | IFCAP_VLAN_HWCSUM;
2511 ifp->if_capenable = ifp->if_capabilities;
2514 ** Don't turn this on by default, if vlans are
2515 ** created on another pseudo device (eg. lagg)
2516 ** then vlan events are not passed thru, breaking
2517 ** operation, but with HW FILTER off it works. If
2518 ** using vlans directly on the ixl driver you can
2519 ** enable this and get full hardware tag filtering.
2521 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2524 * Specify the media types supported by this adapter and register
2525 * callbacks to update media and link information
2527 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2530 aq_error = i40e_aq_get_phy_capabilities(hw,
2531 FALSE, TRUE, &abilities, NULL);
2532 /* May need delay to detect fiber correctly */
2533 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2534 i40e_msec_delay(200);
2535 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2536 TRUE, &abilities, NULL);
2539 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2540 device_printf(dev, "Unknown PHY type detected!\n");
2543 "Error getting supported media types, err %d,"
2544 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2548 ixl_add_ifmedia(vsi, abilities.phy_type);
2550 /* Use autoselect media by default */
2551 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2552 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2554 ether_ifattach(ifp, hw->mac.addr);
2560 ** Run when the Admin Queue gets a
2561 ** link transition interrupt.
2564 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2566 struct i40e_hw *hw = &pf->hw;
2567 struct i40e_aqc_get_link_status *status =
2568 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2571 hw->phy.get_link_info = TRUE;
2572 check = i40e_get_link_status(hw);
2573 pf->link_up = check;
2575 printf("Link is %s\n", check ? "up":"down");
2577 /* Report if Unqualified modules are found */
2578 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2579 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2580 (!(status->link_info & I40E_AQ_LINK_UP)))
2581 device_printf(pf->dev, "Link failed because "
2582 "an unqualified module was detected\n");
2587 /*********************************************************************
2589 * Get Firmware Switch configuration
2590 * - this will need to be more robust when more complex
2591 * switch configurations are enabled.
2593 **********************************************************************/
2595 ixl_switch_config(struct ixl_pf *pf)
2597 struct i40e_hw *hw = &pf->hw;
2598 struct ixl_vsi *vsi = &pf->vsi;
2599 device_t dev = vsi->dev;
2600 struct i40e_aqc_get_switch_config_resp *sw_config;
2601 u8 aq_buf[I40E_AQ_LARGE_BUF];
2605 memset(&aq_buf, 0, sizeof(aq_buf));
2606 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2607 ret = i40e_aq_get_switch_config(hw, sw_config,
2608 sizeof(aq_buf), &next, NULL);
2610 device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2616 "Switch config: header reported: %d in structure, %d total\n",
2617 sw_config->header.num_reported, sw_config->header.num_total);
2618 for (int i = 0; i < sw_config->header.num_reported; i++) {
2620 "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2621 sw_config->element[i].element_type,
2622 sw_config->element[i].seid,
2623 sw_config->element[i].uplink_seid,
2624 sw_config->element[i].downlink_seid);
2627 /* Simplified due to a single VSI at the moment */
2628 vsi->uplink_seid = sw_config->element[0].uplink_seid;
2629 vsi->downlink_seid = sw_config->element[0].downlink_seid;
2630 vsi->seid = sw_config->element[0].seid;
2634 /*********************************************************************
2636 * Initialize the VSI: this handles contexts, which means things
2637 * like the number of descriptors, buffer size,
2638 * plus we init the rings thru this function.
2640 **********************************************************************/
2642 ixl_initialize_vsi(struct ixl_vsi *vsi)
2644 struct ixl_pf *pf = vsi->back;
2645 struct ixl_queue *que = vsi->queues;
2646 device_t dev = vsi->dev;
2647 struct i40e_hw *hw = vsi->hw;
2648 struct i40e_vsi_context ctxt;
2651 memset(&ctxt, 0, sizeof(ctxt));
2652 ctxt.seid = vsi->seid;
2653 if (pf->veb_seid != 0)
2654 ctxt.uplink_seid = pf->veb_seid;
2655 ctxt.pf_num = hw->pf_id;
2656 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2658 device_printf(dev,"get vsi params failed %x!!\n", err);
2662 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2663 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2664 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2665 ctxt.uplink_seid, ctxt.vsi_number,
2666 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2667 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2668 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2671 ** Set the queue and traffic class bits
2672 ** - when multiple traffic classes are supported
2673 ** this will need to be more robust.
2675 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2676 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2677 ctxt.info.queue_mapping[0] = 0;
2678 ctxt.info.tc_mapping[0] = 0x0800;
2680 /* Set VLAN receive stripping mode */
2681 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2682 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2683 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2684 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2686 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2688 /* Keep copy of VSI info in VSI for statistic counters */
2689 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2691 /* Reset VSI statistics */
2692 ixl_vsi_reset_stats(vsi);
2693 vsi->hw_filters_add = 0;
2694 vsi->hw_filters_del = 0;
2696 ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2698 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2700 device_printf(dev,"update vsi params failed %x!!\n",
2701 hw->aq.asq_last_status);
2705 for (int i = 0; i < vsi->num_queues; i++, que++) {
2706 struct tx_ring *txr = &que->txr;
2707 struct rx_ring *rxr = &que->rxr;
2708 struct i40e_hmc_obj_txq tctx;
2709 struct i40e_hmc_obj_rxq rctx;
2714 /* Setup the HMC TX Context */
2715 size = que->num_desc * sizeof(struct i40e_tx_desc);
2716 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2717 tctx.new_context = 1;
2718 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2719 tctx.qlen = que->num_desc;
2721 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2722 /* Enable HEAD writeback */
2723 tctx.head_wb_ena = 1;
2724 tctx.head_wb_addr = txr->dma.pa +
2725 (que->num_desc * sizeof(struct i40e_tx_desc));
2726 tctx.rdylist_act = 0;
2727 err = i40e_clear_lan_tx_queue_context(hw, i);
2729 device_printf(dev, "Unable to clear TX context\n");
2732 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2734 device_printf(dev, "Unable to set TX context\n");
2737 /* Associate the ring with this PF */
2738 txctl = I40E_QTX_CTL_PF_QUEUE;
2739 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2740 I40E_QTX_CTL_PF_INDX_MASK);
2741 wr32(hw, I40E_QTX_CTL(i), txctl);
2744 /* Do ring (re)init */
2745 ixl_init_tx_ring(que);
2747 /* Next setup the HMC RX Context */
2748 if (vsi->max_frame_size <= MCLBYTES)
2749 rxr->mbuf_sz = MCLBYTES;
2751 rxr->mbuf_sz = MJUMPAGESIZE;
2753 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2755 /* Set up an RX context for the HMC */
2756 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2757 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2758 /* ignore header split for now */
2759 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2760 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2761 vsi->max_frame_size : max_rxmax;
2763 rctx.dsize = 1; /* do 32byte descriptors */
2764 rctx.hsplit_0 = 0; /* no HDR split initially */
2765 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2766 rctx.qlen = que->num_desc;
2767 rctx.tphrdesc_ena = 1;
2768 rctx.tphwdesc_ena = 1;
2769 rctx.tphdata_ena = 0;
2770 rctx.tphhead_ena = 0;
2771 rctx.lrxqthresh = 2;
2778 err = i40e_clear_lan_rx_queue_context(hw, i);
2781 "Unable to clear RX context %d\n", i);
2784 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2786 device_printf(dev, "Unable to set RX context %d\n", i);
2789 err = ixl_init_rx_ring(que);
2791 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2794 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2796 /* preserve queue */
2797 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2798 struct netmap_adapter *na = NA(vsi->ifp);
2799 struct netmap_kring *kring = &na->rx_rings[i];
2800 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2801 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2803 #endif /* DEV_NETMAP */
2804 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2810 /*********************************************************************
2812 * Free all VSI structs.
2814 **********************************************************************/
2816 ixl_free_vsi(struct ixl_vsi *vsi)
2818 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2819 struct ixl_queue *que = vsi->queues;
2821 /* Free station queues */
2822 for (int i = 0; i < vsi->num_queues; i++, que++) {
2823 struct tx_ring *txr = &que->txr;
2824 struct rx_ring *rxr = &que->rxr;
2826 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2829 ixl_free_que_tx(que);
2831 i40e_free_dma_mem(&pf->hw, &txr->dma);
2833 IXL_TX_LOCK_DESTROY(txr);
2835 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2838 ixl_free_que_rx(que);
2840 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2842 IXL_RX_LOCK_DESTROY(rxr);
2845 free(vsi->queues, M_DEVBUF);
2847 /* Free VSI filter list */
2848 ixl_free_mac_filters(vsi);
2852 ixl_free_mac_filters(struct ixl_vsi *vsi)
2854 struct ixl_mac_filter *f;
2856 while (!SLIST_EMPTY(&vsi->ftl)) {
2857 f = SLIST_FIRST(&vsi->ftl);
2858 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2864 /*********************************************************************
2866 * Allocate memory for the VSI (virtual station interface) and their
2867 * associated queues, rings and the descriptors associated with each,
2868 * called only once at attach.
2870 **********************************************************************/
2872 ixl_setup_stations(struct ixl_pf *pf)
2874 device_t dev = pf->dev;
2875 struct ixl_vsi *vsi;
2876 struct ixl_queue *que;
2877 struct tx_ring *txr;
2878 struct rx_ring *rxr;
2880 int error = I40E_SUCCESS;
2883 vsi->back = (void *)pf;
2889 /* Get memory for the station queues */
2891 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2892 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2893 device_printf(dev, "Unable to allocate queue memory\n");
2898 for (int i = 0; i < vsi->num_queues; i++) {
2899 que = &vsi->queues[i];
2900 que->num_desc = ixl_ringsz;
2903 /* mark the queue as active */
2904 vsi->active_queues |= (u64)1 << que->me;
2907 txr->tail = I40E_QTX_TAIL(que->me);
2909 /* Initialize the TX lock */
2910 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2911 device_get_nameunit(dev), que->me);
2912 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2913 /* Create the TX descriptor ring */
2914 tsize = roundup2((que->num_desc *
2915 sizeof(struct i40e_tx_desc)) +
2916 sizeof(u32), DBA_ALIGN);
2917 if (i40e_allocate_dma_mem(&pf->hw,
2918 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2920 "Unable to allocate TX Descriptor memory\n");
2924 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2925 bzero((void *)txr->base, tsize);
2926 /* Now allocate transmit soft structs for the ring */
2927 if (ixl_allocate_tx_data(que)) {
2929 "Critical Failure setting up TX structures\n");
2933 /* Allocate a buf ring */
2934 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2935 M_WAITOK, &txr->mtx);
2936 if (txr->br == NULL) {
2938 "Critical Failure setting up TX buf ring\n");
2944 * Next the RX queues...
2946 rsize = roundup2(que->num_desc *
2947 sizeof(union i40e_rx_desc), DBA_ALIGN);
2950 rxr->tail = I40E_QRX_TAIL(que->me);
2952 /* Initialize the RX side lock */
2953 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2954 device_get_nameunit(dev), que->me);
2955 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2957 if (i40e_allocate_dma_mem(&pf->hw,
2958 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2960 "Unable to allocate RX Descriptor memory\n");
2964 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2965 bzero((void *)rxr->base, rsize);
2967 /* Allocate receive soft structs for the ring*/
2968 if (ixl_allocate_rx_data(que)) {
2970 "Critical Failure setting up receive structs\n");
2979 for (int i = 0; i < vsi->num_queues; i++) {
2980 que = &vsi->queues[i];
2984 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2986 i40e_free_dma_mem(&pf->hw, &txr->dma);
2994 ** Provide a update to the queue RX
2995 ** interrupt moderation value.
2998 ixl_set_queue_rx_itr(struct ixl_queue *que)
3000 struct ixl_vsi *vsi = que->vsi;
3001 struct i40e_hw *hw = vsi->hw;
3002 struct rx_ring *rxr = &que->rxr;
3008 /* Idle, do nothing */
3009 if (rxr->bytes == 0)
3012 if (ixl_dynamic_rx_itr) {
3013 rx_bytes = rxr->bytes/rxr->itr;
3016 /* Adjust latency range */
3017 switch (rxr->latency) {
3018 case IXL_LOW_LATENCY:
3019 if (rx_bytes > 10) {
3020 rx_latency = IXL_AVE_LATENCY;
3021 rx_itr = IXL_ITR_20K;
3024 case IXL_AVE_LATENCY:
3025 if (rx_bytes > 20) {
3026 rx_latency = IXL_BULK_LATENCY;
3027 rx_itr = IXL_ITR_8K;
3028 } else if (rx_bytes <= 10) {
3029 rx_latency = IXL_LOW_LATENCY;
3030 rx_itr = IXL_ITR_100K;
3033 case IXL_BULK_LATENCY:
3034 if (rx_bytes <= 20) {
3035 rx_latency = IXL_AVE_LATENCY;
3036 rx_itr = IXL_ITR_20K;
3041 rxr->latency = rx_latency;
3043 if (rx_itr != rxr->itr) {
3044 /* do an exponential smoothing */
3045 rx_itr = (10 * rx_itr * rxr->itr) /
3046 ((9 * rx_itr) + rxr->itr);
3047 rxr->itr = rx_itr & IXL_MAX_ITR;
3048 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3049 que->me), rxr->itr);
3051 } else { /* We may have have toggled to non-dynamic */
3052 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3053 vsi->rx_itr_setting = ixl_rx_itr;
3054 /* Update the hardware if needed */
3055 if (rxr->itr != vsi->rx_itr_setting) {
3056 rxr->itr = vsi->rx_itr_setting;
3057 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3058 que->me), rxr->itr);
3068 ** Provide a update to the queue TX
3069 ** interrupt moderation value.
3072 ixl_set_queue_tx_itr(struct ixl_queue *que)
3074 struct ixl_vsi *vsi = que->vsi;
3075 struct i40e_hw *hw = vsi->hw;
3076 struct tx_ring *txr = &que->txr;
3082 /* Idle, do nothing */
3083 if (txr->bytes == 0)
3086 if (ixl_dynamic_tx_itr) {
3087 tx_bytes = txr->bytes/txr->itr;
3090 switch (txr->latency) {
3091 case IXL_LOW_LATENCY:
3092 if (tx_bytes > 10) {
3093 tx_latency = IXL_AVE_LATENCY;
3094 tx_itr = IXL_ITR_20K;
3097 case IXL_AVE_LATENCY:
3098 if (tx_bytes > 20) {
3099 tx_latency = IXL_BULK_LATENCY;
3100 tx_itr = IXL_ITR_8K;
3101 } else if (tx_bytes <= 10) {
3102 tx_latency = IXL_LOW_LATENCY;
3103 tx_itr = IXL_ITR_100K;
3106 case IXL_BULK_LATENCY:
3107 if (tx_bytes <= 20) {
3108 tx_latency = IXL_AVE_LATENCY;
3109 tx_itr = IXL_ITR_20K;
3114 txr->latency = tx_latency;
3116 if (tx_itr != txr->itr) {
3117 /* do an exponential smoothing */
3118 tx_itr = (10 * tx_itr * txr->itr) /
3119 ((9 * tx_itr) + txr->itr);
3120 txr->itr = tx_itr & IXL_MAX_ITR;
3121 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3122 que->me), txr->itr);
3125 } else { /* We may have have toggled to non-dynamic */
3126 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3127 vsi->tx_itr_setting = ixl_tx_itr;
3128 /* Update the hardware if needed */
3129 if (txr->itr != vsi->tx_itr_setting) {
3130 txr->itr = vsi->tx_itr_setting;
3131 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3132 que->me), txr->itr);
3140 #define QUEUE_NAME_LEN 32
3143 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3144 struct sysctl_ctx_list *ctx, const char *sysctl_name)
3146 struct sysctl_oid *tree;
3147 struct sysctl_oid_list *child;
3148 struct sysctl_oid_list *vsi_list;
3150 tree = device_get_sysctl_tree(pf->dev);
3151 child = SYSCTL_CHILDREN(tree);
3152 vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3153 CTLFLAG_RD, NULL, "VSI Number");
3154 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3156 ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3160 ixl_add_hw_stats(struct ixl_pf *pf)
3162 device_t dev = pf->dev;
3163 struct ixl_vsi *vsi = &pf->vsi;
3164 struct ixl_queue *queues = vsi->queues;
3165 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3167 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3168 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3169 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3170 struct sysctl_oid_list *vsi_list;
3172 struct sysctl_oid *queue_node;
3173 struct sysctl_oid_list *queue_list;
3175 struct tx_ring *txr;
3176 struct rx_ring *rxr;
3177 char queue_namebuf[QUEUE_NAME_LEN];
3179 /* Driver statistics */
3180 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3181 CTLFLAG_RD, &pf->watchdog_events,
3182 "Watchdog timeouts");
3183 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3184 CTLFLAG_RD, &pf->admin_irq,
3185 "Admin Queue IRQ Handled");
3187 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "vc_debug_level",
3188 CTLFLAG_RW, &pf->vc_debug_lvl, 0,
3189 "PF/VF Virtual Channel debug logging level");
3191 ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3192 vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3194 /* Queue statistics */
3195 for (int q = 0; q < vsi->num_queues; q++) {
3196 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3197 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3198 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3199 queue_list = SYSCTL_CHILDREN(queue_node);
3201 txr = &(queues[q].txr);
3202 rxr = &(queues[q].rxr);
3204 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3205 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3206 "m_defrag() failed");
3207 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3208 CTLFLAG_RD, &(queues[q].dropped_pkts),
3209 "Driver dropped packets");
3210 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3211 CTLFLAG_RD, &(queues[q].irqs),
3212 "irqs on this queue");
3213 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3214 CTLFLAG_RD, &(queues[q].tso),
3216 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3217 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3218 "Driver tx dma failure in xmit");
3219 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3220 CTLFLAG_RD, &(txr->no_desc),
3221 "Queue No Descriptor Available");
3222 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3223 CTLFLAG_RD, &(txr->total_packets),
3224 "Queue Packets Transmitted");
3225 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3226 CTLFLAG_RD, &(txr->tx_bytes),
3227 "Queue Bytes Transmitted");
3228 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3229 CTLFLAG_RD, &(rxr->rx_packets),
3230 "Queue Packets Received");
3231 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3232 CTLFLAG_RD, &(rxr->rx_bytes),
3233 "Queue Bytes Received");
3237 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3241 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3242 struct sysctl_oid_list *child,
3243 struct i40e_eth_stats *eth_stats)
3245 struct ixl_sysctl_info ctls[] =
3247 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3248 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3249 "Unicast Packets Received"},
3250 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3251 "Multicast Packets Received"},
3252 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3253 "Broadcast Packets Received"},
3254 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3255 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3256 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3257 {ð_stats->tx_multicast, "mcast_pkts_txd",
3258 "Multicast Packets Transmitted"},
3259 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3260 "Broadcast Packets Transmitted"},
3265 struct ixl_sysctl_info *entry = ctls;
3266 while (entry->stat != 0)
3268 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3269 CTLFLAG_RD, entry->stat,
3270 entry->description);
3276 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3277 struct sysctl_oid_list *child,
3278 struct i40e_hw_port_stats *stats)
3280 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3281 CTLFLAG_RD, NULL, "Mac Statistics");
3282 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3284 struct i40e_eth_stats *eth_stats = &stats->eth;
3285 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3287 struct ixl_sysctl_info ctls[] =
3289 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3290 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3291 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3292 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3293 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3294 /* Packet Reception Stats */
3295 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3296 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3297 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3298 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3299 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3300 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3301 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3302 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3303 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3304 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3305 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3306 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3307 /* Packet Transmission Stats */
3308 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3309 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3310 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3311 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3312 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3313 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3314 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3316 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3317 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3318 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3319 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3324 struct ixl_sysctl_info *entry = ctls;
3325 while (entry->stat != 0)
3327 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3328 CTLFLAG_RD, entry->stat,
3329 entry->description);
3335 ** ixl_config_rss - setup RSS
3336 ** - note this is done for the single vsi
3338 static void ixl_config_rss(struct ixl_vsi *vsi)
3340 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3341 struct i40e_hw *hw = vsi->hw;
3343 u64 set_hena = 0, hena;
3346 u32 rss_hash_config;
3347 u32 rss_seed[IXL_KEYSZ];
3349 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3350 0x183cfd8c, 0xce880440, 0x580cbc3c,
3351 0x35897377, 0x328b25e1, 0x4fa98922,
3352 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3356 /* Fetch the configured RSS key */
3357 rss_getkey((uint8_t *) &rss_seed);
3360 /* Fill out hash function seed */
3361 for (i = 0; i < IXL_KEYSZ; i++)
3362 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3364 /* Enable PCTYPES for RSS: */
3366 rss_hash_config = rss_gethashconfig();
3367 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3368 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3369 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3370 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3371 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3372 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3373 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3374 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3375 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3376 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3377 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3378 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3379 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3380 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3383 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3384 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3385 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3386 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3387 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3388 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3389 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3390 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3391 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3392 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3393 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3395 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3396 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3398 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3399 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3401 /* Populate the LUT with max no. of queues in round robin fashion */
3402 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3403 if (j == vsi->num_queues)
3407 * Fetch the RSS bucket id for the given indirection entry.
3408 * Cap it at the number of configured buckets (which is
3411 que_id = rss_get_indirection_to_bucket(i);
3412 que_id = que_id % vsi->num_queues;
3416 /* lut = 4-byte sliding window of 4 lut entries */
3417 lut = (lut << 8) | (que_id &
3418 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3419 /* On i = 3, we have 4 entries in lut; write to the register */
3421 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3428 ** This routine is run via an vlan config EVENT,
3429 ** it enables us to use the HW Filter table since
3430 ** we can get the vlan id. This just creates the
3431 ** entry in the soft version of the VFTA, init will
3432 ** repopulate the real table.
3435 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3437 struct ixl_vsi *vsi = ifp->if_softc;
3438 struct i40e_hw *hw = vsi->hw;
3439 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3441 if (ifp->if_softc != arg) /* Not our event */
3444 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3449 ixl_add_filter(vsi, hw->mac.addr, vtag);
3454 ** This routine is run via an vlan
3455 ** unconfig EVENT, remove our entry
3456 ** in the soft vfta.
3459 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3461 struct ixl_vsi *vsi = ifp->if_softc;
3462 struct i40e_hw *hw = vsi->hw;
3463 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3465 if (ifp->if_softc != arg)
3468 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3473 ixl_del_filter(vsi, hw->mac.addr, vtag);
3478 ** This routine updates vlan filters, called by init
3479 ** it scans the filter table and then updates the hw
3480 ** after a soft reset.
3483 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3485 struct ixl_mac_filter *f;
3488 if (vsi->num_vlans == 0)
3491 ** Scan the filter list for vlan entries,
3492 ** mark them for addition and then call
3493 ** for the AQ update.
3495 SLIST_FOREACH(f, &vsi->ftl, next) {
3496 if (f->flags & IXL_FILTER_VLAN) {
3504 printf("setup vlan: no filters found!\n");
3507 flags = IXL_FILTER_VLAN;
3508 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3509 ixl_add_hw_filters(vsi, flags, cnt);
3514 ** Initialize filter list and add filters that the hardware
3515 ** needs to know about.
3518 ixl_init_filters(struct ixl_vsi *vsi)
3520 /* Add broadcast address */
3521 ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3525 ** This routine adds mulicast filters
3528 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3530 struct ixl_mac_filter *f;
3532 /* Does one already exist */
3533 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3537 f = ixl_get_filter(vsi);
3539 printf("WARNING: no filter available!!\n");
3542 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3543 f->vlan = IXL_VLAN_ANY;
3544 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3551 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3554 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3558 ** This routine adds macvlan filters
3561 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3563 struct ixl_mac_filter *f, *tmp;
3567 DEBUGOUT("ixl_add_filter: begin");
3572 /* Does one already exist */
3573 f = ixl_find_filter(vsi, macaddr, vlan);
3577 ** Is this the first vlan being registered, if so we
3578 ** need to remove the ANY filter that indicates we are
3579 ** not in a vlan, and replace that with a 0 filter.
3581 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3582 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3584 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3585 ixl_add_filter(vsi, macaddr, 0);
3589 f = ixl_get_filter(vsi);
3591 device_printf(dev, "WARNING: no filter available!!\n");
3594 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3596 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3597 if (f->vlan != IXL_VLAN_ANY)
3598 f->flags |= IXL_FILTER_VLAN;
3602 ixl_add_hw_filters(vsi, f->flags, 1);
3607 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3609 struct ixl_mac_filter *f;
3611 f = ixl_find_filter(vsi, macaddr, vlan);
3615 f->flags |= IXL_FILTER_DEL;
3616 ixl_del_hw_filters(vsi, 1);
3619 /* Check if this is the last vlan removal */
3620 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3621 /* Switch back to a non-vlan filter */
3622 ixl_del_filter(vsi, macaddr, 0);
3623 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3629 ** Find the filter with both matching mac addr and vlan id
3631 static struct ixl_mac_filter *
3632 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3634 struct ixl_mac_filter *f;
3637 SLIST_FOREACH(f, &vsi->ftl, next) {
3638 if (!cmp_etheraddr(f->macaddr, macaddr))
3640 if (f->vlan == vlan) {
3652 ** This routine takes additions to the vsi filter
3653 ** table and creates an Admin Queue call to create
3654 ** the filters in the hardware.
3657 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3659 struct i40e_aqc_add_macvlan_element_data *a, *b;
3660 struct ixl_mac_filter *f;
3669 IXL_PF_LOCK_ASSERT(pf);
3671 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3672 M_DEVBUF, M_NOWAIT | M_ZERO);
3674 device_printf(dev, "add_hw_filters failed to get memory\n");
3679 ** Scan the filter list, each time we find one
3680 ** we add it to the admin queue array and turn off
3683 SLIST_FOREACH(f, &vsi->ftl, next) {
3684 if (f->flags == flags) {
3685 b = &a[j]; // a pox on fvl long names :)
3686 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3687 if (f->vlan == IXL_VLAN_ANY) {
3689 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3691 b->vlan_tag = f->vlan;
3694 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3695 f->flags &= ~IXL_FILTER_ADD;
3702 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3704 device_printf(dev, "aq_add_macvlan err %d, "
3705 "aq_error %d\n", err, hw->aq.asq_last_status);
3707 vsi->hw_filters_add += j;
3714 ** This routine takes removals in the vsi filter
3715 ** table and creates an Admin Queue call to delete
3716 ** the filters in the hardware.
3719 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3721 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3725 struct ixl_mac_filter *f, *f_temp;
3728 DEBUGOUT("ixl_del_hw_filters: begin\n");
3734 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3735 M_DEVBUF, M_NOWAIT | M_ZERO);
3737 printf("del hw filter failed to get memory\n");
3741 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3742 if (f->flags & IXL_FILTER_DEL) {
3743 e = &d[j]; // a pox on fvl long names :)
3744 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3745 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3746 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3747 /* delete entry from vsi list */
3748 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3756 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3757 /* NOTE: returns ENOENT every time but seems to work fine,
3758 so we'll ignore that specific error. */
3759 // TODO: Does this still occur on current firmwares?
3760 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3762 for (int i = 0; i < j; i++)
3763 sc += (!d[i].error_code);
3764 vsi->hw_filters_del += sc;
3766 "Failed to remove %d/%d filters, aq error %d\n",
3767 j - sc, j, hw->aq.asq_last_status);
3769 vsi->hw_filters_del += j;
3773 DEBUGOUT("ixl_del_hw_filters: end\n");
3778 ixl_enable_rings(struct ixl_vsi *vsi)
3780 struct ixl_pf *pf = vsi->back;
3781 struct i40e_hw *hw = &pf->hw;
3786 for (int i = 0; i < vsi->num_queues; i++) {
3787 index = vsi->first_queue + i;
3788 i40e_pre_tx_queue_cfg(hw, index, TRUE);
3790 reg = rd32(hw, I40E_QTX_ENA(index));
3791 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3792 I40E_QTX_ENA_QENA_STAT_MASK;
3793 wr32(hw, I40E_QTX_ENA(index), reg);
3794 /* Verify the enable took */
3795 for (int j = 0; j < 10; j++) {
3796 reg = rd32(hw, I40E_QTX_ENA(index));
3797 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3799 i40e_msec_delay(10);
3801 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3802 device_printf(pf->dev, "TX queue %d disabled!\n",
3807 reg = rd32(hw, I40E_QRX_ENA(index));
3808 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3809 I40E_QRX_ENA_QENA_STAT_MASK;
3810 wr32(hw, I40E_QRX_ENA(index), reg);
3811 /* Verify the enable took */
3812 for (int j = 0; j < 10; j++) {
3813 reg = rd32(hw, I40E_QRX_ENA(index));
3814 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3816 i40e_msec_delay(10);
3818 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3819 device_printf(pf->dev, "RX queue %d disabled!\n",
3829 ixl_disable_rings(struct ixl_vsi *vsi)
3831 struct ixl_pf *pf = vsi->back;
3832 struct i40e_hw *hw = &pf->hw;
3837 for (int i = 0; i < vsi->num_queues; i++) {
3838 index = vsi->first_queue + i;
3840 i40e_pre_tx_queue_cfg(hw, index, FALSE);
3841 i40e_usec_delay(500);
3843 reg = rd32(hw, I40E_QTX_ENA(index));
3844 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3845 wr32(hw, I40E_QTX_ENA(index), reg);
3846 /* Verify the disable took */
3847 for (int j = 0; j < 10; j++) {
3848 reg = rd32(hw, I40E_QTX_ENA(index));
3849 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3851 i40e_msec_delay(10);
3853 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3854 device_printf(pf->dev, "TX queue %d still enabled!\n",
3859 reg = rd32(hw, I40E_QRX_ENA(index));
3860 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3861 wr32(hw, I40E_QRX_ENA(index), reg);
3862 /* Verify the disable took */
3863 for (int j = 0; j < 10; j++) {
3864 reg = rd32(hw, I40E_QRX_ENA(index));
3865 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3867 i40e_msec_delay(10);
3869 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3870 device_printf(pf->dev, "RX queue %d still enabled!\n",
3880 * ixl_handle_mdd_event
3882 * Called from interrupt handler to identify possibly malicious vfs
3883 * (But also detects events from the PF, as well)
3885 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3887 struct i40e_hw *hw = &pf->hw;
3888 device_t dev = pf->dev;
3889 bool mdd_detected = false;
3890 bool pf_mdd_detected = false;
3893 /* find what triggered the MDD event */
3894 reg = rd32(hw, I40E_GL_MDET_TX);
3895 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3896 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3897 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3898 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3899 I40E_GL_MDET_TX_EVENT_SHIFT;
3900 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3901 I40E_GL_MDET_TX_QUEUE_SHIFT;
3903 "Malicious Driver Detection event 0x%02x"
3904 " on TX queue %d pf number 0x%02x\n",
3905 event, queue, pf_num);
3906 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3907 mdd_detected = true;
3909 reg = rd32(hw, I40E_GL_MDET_RX);
3910 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3911 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3912 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3913 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3914 I40E_GL_MDET_RX_EVENT_SHIFT;
3915 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3916 I40E_GL_MDET_RX_QUEUE_SHIFT;
3918 "Malicious Driver Detection event 0x%02x"
3919 " on RX queue %d of function 0x%02x\n",
3920 event, queue, func);
3921 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3922 mdd_detected = true;
3926 reg = rd32(hw, I40E_PF_MDET_TX);
3927 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3928 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3930 "MDD TX event is for this function 0x%08x",
3932 pf_mdd_detected = true;
3934 reg = rd32(hw, I40E_PF_MDET_RX);
3935 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3936 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3938 "MDD RX event is for this function 0x%08x",
3940 pf_mdd_detected = true;
3944 /* re-enable mdd interrupt cause */
3945 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3946 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3947 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3952 ixl_enable_intr(struct ixl_vsi *vsi)
3954 struct i40e_hw *hw = vsi->hw;
3955 struct ixl_queue *que = vsi->queues;
3957 if (ixl_enable_msix) {
3958 ixl_enable_adminq(hw);
3959 for (int i = 0; i < vsi->num_queues; i++, que++)
3960 ixl_enable_queue(hw, que->me);
3962 ixl_enable_legacy(hw);
3966 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3968 struct i40e_hw *hw = vsi->hw;
3969 struct ixl_queue *que = vsi->queues;
3971 for (int i = 0; i < vsi->num_queues; i++, que++)
3972 ixl_disable_queue(hw, que->me);
3976 ixl_disable_intr(struct ixl_vsi *vsi)
3978 struct i40e_hw *hw = vsi->hw;
3980 if (ixl_enable_msix)
3981 ixl_disable_adminq(hw);
3983 ixl_disable_legacy(hw);
3987 ixl_enable_adminq(struct i40e_hw *hw)
3991 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3992 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3993 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3994 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4000 ixl_disable_adminq(struct i40e_hw *hw)
4004 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4005 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4011 ixl_enable_queue(struct i40e_hw *hw, int id)
4015 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4016 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4017 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4018 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4022 ixl_disable_queue(struct i40e_hw *hw, int id)
4026 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4027 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4033 ixl_enable_legacy(struct i40e_hw *hw)
4036 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4037 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4038 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4039 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4043 ixl_disable_legacy(struct i40e_hw *hw)
4047 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4048 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4054 ixl_update_stats_counters(struct ixl_pf *pf)
4056 struct i40e_hw *hw = &pf->hw;
4057 struct ixl_vsi *vsi = &pf->vsi;
4060 struct i40e_hw_port_stats *nsd = &pf->stats;
4061 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4063 /* Update hw stats */
4064 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4065 pf->stat_offsets_loaded,
4066 &osd->crc_errors, &nsd->crc_errors);
4067 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4068 pf->stat_offsets_loaded,
4069 &osd->illegal_bytes, &nsd->illegal_bytes);
4070 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4071 I40E_GLPRT_GORCL(hw->port),
4072 pf->stat_offsets_loaded,
4073 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4074 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4075 I40E_GLPRT_GOTCL(hw->port),
4076 pf->stat_offsets_loaded,
4077 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4078 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4079 pf->stat_offsets_loaded,
4080 &osd->eth.rx_discards,
4081 &nsd->eth.rx_discards);
4082 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4083 I40E_GLPRT_UPRCL(hw->port),
4084 pf->stat_offsets_loaded,
4085 &osd->eth.rx_unicast,
4086 &nsd->eth.rx_unicast);
4087 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4088 I40E_GLPRT_UPTCL(hw->port),
4089 pf->stat_offsets_loaded,
4090 &osd->eth.tx_unicast,
4091 &nsd->eth.tx_unicast);
4092 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4093 I40E_GLPRT_MPRCL(hw->port),
4094 pf->stat_offsets_loaded,
4095 &osd->eth.rx_multicast,
4096 &nsd->eth.rx_multicast);
4097 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4098 I40E_GLPRT_MPTCL(hw->port),
4099 pf->stat_offsets_loaded,
4100 &osd->eth.tx_multicast,
4101 &nsd->eth.tx_multicast);
4102 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4103 I40E_GLPRT_BPRCL(hw->port),
4104 pf->stat_offsets_loaded,
4105 &osd->eth.rx_broadcast,
4106 &nsd->eth.rx_broadcast);
4107 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4108 I40E_GLPRT_BPTCL(hw->port),
4109 pf->stat_offsets_loaded,
4110 &osd->eth.tx_broadcast,
4111 &nsd->eth.tx_broadcast);
4113 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4114 pf->stat_offsets_loaded,
4115 &osd->tx_dropped_link_down,
4116 &nsd->tx_dropped_link_down);
4117 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4118 pf->stat_offsets_loaded,
4119 &osd->mac_local_faults,
4120 &nsd->mac_local_faults);
4121 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4122 pf->stat_offsets_loaded,
4123 &osd->mac_remote_faults,
4124 &nsd->mac_remote_faults);
4125 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4126 pf->stat_offsets_loaded,
4127 &osd->rx_length_errors,
4128 &nsd->rx_length_errors);
4130 /* Flow control (LFC) stats */
4131 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4132 pf->stat_offsets_loaded,
4133 &osd->link_xon_rx, &nsd->link_xon_rx);
4134 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4135 pf->stat_offsets_loaded,
4136 &osd->link_xon_tx, &nsd->link_xon_tx);
4137 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4138 pf->stat_offsets_loaded,
4139 &osd->link_xoff_rx, &nsd->link_xoff_rx);
4140 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4141 pf->stat_offsets_loaded,
4142 &osd->link_xoff_tx, &nsd->link_xoff_tx);
4144 /* Packet size stats rx */
4145 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4146 I40E_GLPRT_PRC64L(hw->port),
4147 pf->stat_offsets_loaded,
4148 &osd->rx_size_64, &nsd->rx_size_64);
4149 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4150 I40E_GLPRT_PRC127L(hw->port),
4151 pf->stat_offsets_loaded,
4152 &osd->rx_size_127, &nsd->rx_size_127);
4153 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4154 I40E_GLPRT_PRC255L(hw->port),
4155 pf->stat_offsets_loaded,
4156 &osd->rx_size_255, &nsd->rx_size_255);
4157 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4158 I40E_GLPRT_PRC511L(hw->port),
4159 pf->stat_offsets_loaded,
4160 &osd->rx_size_511, &nsd->rx_size_511);
4161 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4162 I40E_GLPRT_PRC1023L(hw->port),
4163 pf->stat_offsets_loaded,
4164 &osd->rx_size_1023, &nsd->rx_size_1023);
4165 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4166 I40E_GLPRT_PRC1522L(hw->port),
4167 pf->stat_offsets_loaded,
4168 &osd->rx_size_1522, &nsd->rx_size_1522);
4169 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4170 I40E_GLPRT_PRC9522L(hw->port),
4171 pf->stat_offsets_loaded,
4172 &osd->rx_size_big, &nsd->rx_size_big);
4174 /* Packet size stats tx */
4175 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4176 I40E_GLPRT_PTC64L(hw->port),
4177 pf->stat_offsets_loaded,
4178 &osd->tx_size_64, &nsd->tx_size_64);
4179 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4180 I40E_GLPRT_PTC127L(hw->port),
4181 pf->stat_offsets_loaded,
4182 &osd->tx_size_127, &nsd->tx_size_127);
4183 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4184 I40E_GLPRT_PTC255L(hw->port),
4185 pf->stat_offsets_loaded,
4186 &osd->tx_size_255, &nsd->tx_size_255);
4187 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4188 I40E_GLPRT_PTC511L(hw->port),
4189 pf->stat_offsets_loaded,
4190 &osd->tx_size_511, &nsd->tx_size_511);
4191 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4192 I40E_GLPRT_PTC1023L(hw->port),
4193 pf->stat_offsets_loaded,
4194 &osd->tx_size_1023, &nsd->tx_size_1023);
4195 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4196 I40E_GLPRT_PTC1522L(hw->port),
4197 pf->stat_offsets_loaded,
4198 &osd->tx_size_1522, &nsd->tx_size_1522);
4199 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4200 I40E_GLPRT_PTC9522L(hw->port),
4201 pf->stat_offsets_loaded,
4202 &osd->tx_size_big, &nsd->tx_size_big);
4204 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4205 pf->stat_offsets_loaded,
4206 &osd->rx_undersize, &nsd->rx_undersize);
4207 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4208 pf->stat_offsets_loaded,
4209 &osd->rx_fragments, &nsd->rx_fragments);
4210 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4211 pf->stat_offsets_loaded,
4212 &osd->rx_oversize, &nsd->rx_oversize);
4213 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4214 pf->stat_offsets_loaded,
4215 &osd->rx_jabber, &nsd->rx_jabber);
4216 pf->stat_offsets_loaded = true;
4219 /* Update vsi stats */
4220 ixl_update_vsi_stats(vsi);
4222 for (int i = 0; i < pf->num_vfs; i++) {
4224 if (vf->vf_flags & VF_FLAG_ENABLED)
4225 ixl_update_eth_stats(&pf->vfs[i].vsi);
4230 ** Tasklet handler for MSIX Adminq interrupts
4231 ** - do outside interrupt since it might sleep
4234 ixl_do_adminq(void *context, int pending)
4236 struct ixl_pf *pf = context;
4237 struct i40e_hw *hw = &pf->hw;
4238 struct ixl_vsi *vsi = &pf->vsi;
4239 struct i40e_arq_event_info event;
4244 event.buf_len = IXL_AQ_BUF_SZ;
4245 event.msg_buf = malloc(event.buf_len,
4246 M_DEVBUF, M_NOWAIT | M_ZERO);
4247 if (!event.msg_buf) {
4248 printf("Unable to allocate adminq memory\n");
4253 /* clean and process any events */
4255 ret = i40e_clean_arq_element(hw, &event, &result);
4258 opcode = LE16_TO_CPU(event.desc.opcode);
4260 case i40e_aqc_opc_get_link_status:
4261 ixl_link_event(pf, &event);
4262 ixl_update_link_status(pf);
4264 case i40e_aqc_opc_send_msg_to_pf:
4266 ixl_handle_vf_msg(pf, &event);
4269 case i40e_aqc_opc_event_lan_overflow:
4273 printf("AdminQ unknown event %x\n", opcode);
4278 } while (result && (loop++ < IXL_ADM_LIMIT));
4280 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4281 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4282 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4283 free(event.msg_buf, M_DEVBUF);
4286 * If there are still messages to process, reschedule ourselves.
4287 * Otherwise, re-enable our interrupt and go to sleep.
4290 taskqueue_enqueue(pf->tq, &pf->adminq);
4292 ixl_enable_intr(vsi);
4298 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4301 int error, input = 0;
4303 error = sysctl_handle_int(oidp, &input, 0, req);
4305 if (error || !req->newptr)
4309 pf = (struct ixl_pf *)arg1;
4310 ixl_print_debug_info(pf);
4317 ixl_print_debug_info(struct ixl_pf *pf)
4319 struct i40e_hw *hw = &pf->hw;
4320 struct ixl_vsi *vsi = &pf->vsi;
4321 struct ixl_queue *que = vsi->queues;
4322 struct rx_ring *rxr = &que->rxr;
4323 struct tx_ring *txr = &que->txr;
4327 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4328 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4329 printf("RX next check = %x\n", rxr->next_check);
4330 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4331 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4332 printf("TX desc avail = %x\n", txr->avail);
4334 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4335 printf("RX Bytes = %x\n", reg);
4336 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4337 printf("Port RX Bytes = %x\n", reg);
4338 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4339 printf("RX discard = %x\n", reg);
4340 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4341 printf("Port RX discard = %x\n", reg);
4343 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4344 printf("TX errors = %x\n", reg);
4345 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4346 printf("TX Bytes = %x\n", reg);
4348 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4349 printf("RX undersize = %x\n", reg);
4350 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4351 printf("RX fragments = %x\n", reg);
4352 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4353 printf("RX oversize = %x\n", reg);
4354 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4355 printf("RX length error = %x\n", reg);
4356 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4357 printf("mac remote fault = %x\n", reg);
4358 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4359 printf("mac local fault = %x\n", reg);
4363 * Update VSI-specific ethernet statistics counters.
4365 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4367 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4368 struct i40e_hw *hw = &pf->hw;
4369 struct i40e_eth_stats *es;
4370 struct i40e_eth_stats *oes;
4371 struct i40e_hw_port_stats *nsd;
4372 u16 stat_idx = vsi->info.stat_counter_idx;
4374 es = &vsi->eth_stats;
4375 oes = &vsi->eth_stats_offsets;
4378 /* Gather up the stats that the hw collects */
4379 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4380 vsi->stat_offsets_loaded,
4381 &oes->tx_errors, &es->tx_errors);
4382 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4383 vsi->stat_offsets_loaded,
4384 &oes->rx_discards, &es->rx_discards);
4386 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4387 I40E_GLV_GORCL(stat_idx),
4388 vsi->stat_offsets_loaded,
4389 &oes->rx_bytes, &es->rx_bytes);
4390 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4391 I40E_GLV_UPRCL(stat_idx),
4392 vsi->stat_offsets_loaded,
4393 &oes->rx_unicast, &es->rx_unicast);
4394 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4395 I40E_GLV_MPRCL(stat_idx),
4396 vsi->stat_offsets_loaded,
4397 &oes->rx_multicast, &es->rx_multicast);
4398 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4399 I40E_GLV_BPRCL(stat_idx),
4400 vsi->stat_offsets_loaded,
4401 &oes->rx_broadcast, &es->rx_broadcast);
4403 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4404 I40E_GLV_GOTCL(stat_idx),
4405 vsi->stat_offsets_loaded,
4406 &oes->tx_bytes, &es->tx_bytes);
4407 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4408 I40E_GLV_UPTCL(stat_idx),
4409 vsi->stat_offsets_loaded,
4410 &oes->tx_unicast, &es->tx_unicast);
4411 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4412 I40E_GLV_MPTCL(stat_idx),
4413 vsi->stat_offsets_loaded,
4414 &oes->tx_multicast, &es->tx_multicast);
4415 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4416 I40E_GLV_BPTCL(stat_idx),
4417 vsi->stat_offsets_loaded,
4418 &oes->tx_broadcast, &es->tx_broadcast);
4419 vsi->stat_offsets_loaded = true;
4423 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4427 struct i40e_eth_stats *es;
4430 struct i40e_hw_port_stats *nsd;
4434 es = &vsi->eth_stats;
4437 ixl_update_eth_stats(vsi);
4439 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4440 for (int i = 0; i < vsi->num_queues; i++)
4441 tx_discards += vsi->queues[i].txr.br->br_drops;
4443 /* Update ifnet stats */
4444 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4447 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4450 IXL_SET_IBYTES(vsi, es->rx_bytes);
4451 IXL_SET_OBYTES(vsi, es->tx_bytes);
4452 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4453 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4455 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4456 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4458 IXL_SET_OERRORS(vsi, es->tx_errors);
4459 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4460 IXL_SET_OQDROPS(vsi, tx_discards);
4461 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4462 IXL_SET_COLLISIONS(vsi, 0);
4466 * Reset all of the stats for the given pf
4468 void ixl_pf_reset_stats(struct ixl_pf *pf)
4470 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4471 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4472 pf->stat_offsets_loaded = false;
4476 * Resets all stats of the given vsi
4478 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4480 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4481 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4482 vsi->stat_offsets_loaded = false;
4486 * Read and update a 48 bit stat from the hw
4488 * Since the device stats are not reset at PFReset, they likely will not
4489 * be zeroed when the driver starts. We'll save the first values read
4490 * and use them as offsets to be subtracted from the raw values in order
4491 * to report stats that count from zero.
4494 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4495 bool offset_loaded, u64 *offset, u64 *stat)
4499 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4500 new_data = rd64(hw, loreg);
4503 * Use two rd32's instead of one rd64; FreeBSD versions before
4504 * 10 don't support 8 byte bus reads/writes.
4506 new_data = rd32(hw, loreg);
4507 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4512 if (new_data >= *offset)
4513 *stat = new_data - *offset;
4515 *stat = (new_data + ((u64)1 << 48)) - *offset;
4516 *stat &= 0xFFFFFFFFFFFFULL;
4520 * Read and update a 32 bit stat from the hw
4523 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4524 bool offset_loaded, u64 *offset, u64 *stat)
4528 new_data = rd32(hw, reg);
4531 if (new_data >= *offset)
4532 *stat = (u32)(new_data - *offset);
4534 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4538 ** Set flow control using sysctl:
4545 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4548 * TODO: ensure flow control is disabled if
4549 * priority flow control is enabled
4551 * TODO: ensure tx CRC by hardware should be enabled
4552 * if tx flow control is enabled.
4554 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4555 struct i40e_hw *hw = &pf->hw;
4556 device_t dev = pf->dev;
4558 enum i40e_status_code aq_error = 0;
4562 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4563 if ((error) || (req->newptr == NULL))
4565 if (pf->fc < 0 || pf->fc > 3) {
4567 "Invalid fc mode; valid modes are 0 through 3\n");
4572 ** Changing flow control mode currently does not work on
4575 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4576 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4577 device_printf(dev, "Changing flow control mode unsupported"
4578 " on 40GBase-CR4 media.\n");
4582 /* Set fc ability for port */
4583 hw->fc.requested_mode = pf->fc;
4584 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4587 "%s: Error setting new fc mode %d; fc_err %#x\n",
4588 __func__, aq_error, fc_aq_err);
4596 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4598 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4599 struct i40e_hw *hw = &pf->hw;
4600 int error = 0, index = 0;
4611 ixl_update_link_status(pf);
4613 switch (hw->phy.link_info.link_speed) {
4614 case I40E_LINK_SPEED_100MB:
4617 case I40E_LINK_SPEED_1GB:
4620 case I40E_LINK_SPEED_10GB:
4623 case I40E_LINK_SPEED_40GB:
4626 case I40E_LINK_SPEED_20GB:
4629 case I40E_LINK_SPEED_UNKNOWN:
4635 error = sysctl_handle_string(oidp, speeds[index],
4636 strlen(speeds[index]), req);
4641 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4643 struct i40e_hw *hw = &pf->hw;
4644 device_t dev = pf->dev;
4645 struct i40e_aq_get_phy_abilities_resp abilities;
4646 struct i40e_aq_set_phy_config config;
4647 enum i40e_status_code aq_error = 0;
4649 /* Get current capability information */
4650 aq_error = i40e_aq_get_phy_capabilities(hw,
4651 FALSE, FALSE, &abilities, NULL);
4654 "%s: Error getting phy capabilities %d,"
4655 " aq error: %d\n", __func__, aq_error,
4656 hw->aq.asq_last_status);
4660 /* Prepare new config */
4661 bzero(&config, sizeof(config));
4662 config.phy_type = abilities.phy_type;
4663 config.abilities = abilities.abilities
4664 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4665 config.eee_capability = abilities.eee_capability;
4666 config.eeer = abilities.eeer_val;
4667 config.low_power_ctrl = abilities.d3_lpan;
4668 /* Translate into aq cmd link_speed */
4670 config.link_speed |= I40E_LINK_SPEED_20GB;
4672 config.link_speed |= I40E_LINK_SPEED_10GB;
4674 config.link_speed |= I40E_LINK_SPEED_1GB;
4676 config.link_speed |= I40E_LINK_SPEED_100MB;
4678 /* Do aq command & restart link */
4679 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4682 "%s: Error setting new phy config %d,"
4683 " aq error: %d\n", __func__, aq_error,
4684 hw->aq.asq_last_status);
4689 ** This seems a bit heavy handed, but we
4690 ** need to get a reinit on some devices
4694 ixl_init_locked(pf);
4701 ** Control link advertise speed:
4703 ** 0x1 - advertise 100 Mb
4704 ** 0x2 - advertise 1G
4705 ** 0x4 - advertise 10G
4706 ** 0x8 - advertise 20G
4708 ** Does not work on 40G devices.
4711 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4713 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4714 struct i40e_hw *hw = &pf->hw;
4715 device_t dev = pf->dev;
4716 int requested_ls = 0;
4720 ** FW doesn't support changing advertised speed
4721 ** for 40G devices; speed is always 40G.
4723 if (i40e_is_40G_device(hw->device_id))
4726 /* Read in new mode */
4727 requested_ls = pf->advertised_speed;
4728 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4729 if ((error) || (req->newptr == NULL))
4731 /* Check for sane value */
4732 if (requested_ls < 0x1 || requested_ls > 0xE) {
4733 device_printf(dev, "Invalid advertised speed; "
4734 "valid modes are 0x1 through 0xE\n");
4737 /* Then check for validity based on adapter type */
4738 switch (hw->device_id) {
4739 case I40E_DEV_ID_10G_BASE_T:
4740 if (requested_ls & 0x8) {
4742 "20Gbs speed not supported on this device.\n");
4746 case I40E_DEV_ID_20G_KR2:
4747 if (requested_ls & 0x1) {
4749 "100Mbs speed not supported on this device.\n");
4754 if (requested_ls & ~0x6) {
4756 "Only 1/10Gbs speeds are supported on this device.\n");
4762 /* Exit if no change */
4763 if (pf->advertised_speed == requested_ls)
4766 error = ixl_set_advertised_speeds(pf, requested_ls);
4770 pf->advertised_speed = requested_ls;
4771 ixl_update_link_status(pf);
4776 ** Get the width and transaction speed of
4777 ** the bus this adapter is plugged into.
4780 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4786 /* Get the PCI Express Capabilities offset */
4787 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4789 /* ...and read the Link Status Register */
4790 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4792 switch (link & I40E_PCI_LINK_WIDTH) {
4793 case I40E_PCI_LINK_WIDTH_1:
4794 hw->bus.width = i40e_bus_width_pcie_x1;
4796 case I40E_PCI_LINK_WIDTH_2:
4797 hw->bus.width = i40e_bus_width_pcie_x2;
4799 case I40E_PCI_LINK_WIDTH_4:
4800 hw->bus.width = i40e_bus_width_pcie_x4;
4802 case I40E_PCI_LINK_WIDTH_8:
4803 hw->bus.width = i40e_bus_width_pcie_x8;
4806 hw->bus.width = i40e_bus_width_unknown;
4810 switch (link & I40E_PCI_LINK_SPEED) {
4811 case I40E_PCI_LINK_SPEED_2500:
4812 hw->bus.speed = i40e_bus_speed_2500;
4814 case I40E_PCI_LINK_SPEED_5000:
4815 hw->bus.speed = i40e_bus_speed_5000;
4817 case I40E_PCI_LINK_SPEED_8000:
4818 hw->bus.speed = i40e_bus_speed_8000;
4821 hw->bus.speed = i40e_bus_speed_unknown;
4826 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4827 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4828 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4829 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4830 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4831 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4832 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4835 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4836 (hw->bus.speed < i40e_bus_speed_8000)) {
4837 device_printf(dev, "PCI-Express bandwidth available"
4838 " for this device\n may be insufficient for"
4839 " optimal performance.\n");
4840 device_printf(dev, "For expected performance a x8 "
4841 "PCIE Gen3 slot is required.\n");
4848 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4850 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4851 struct i40e_hw *hw = &pf->hw;
4854 snprintf(buf, sizeof(buf),
4855 "f%d.%d a%d.%d n%02x.%02x e%08x",
4856 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4857 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4858 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4859 IXL_NVM_VERSION_HI_SHIFT,
4860 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4861 IXL_NVM_VERSION_LO_SHIFT,
4863 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4867 #ifdef IXL_DEBUG_SYSCTL
4869 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4871 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4872 struct i40e_hw *hw = &pf->hw;
4873 struct i40e_link_status link_status;
4876 enum i40e_status_code aq_error = 0;
4878 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4880 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4885 "PHY Type : %#04x\n"
4887 "Link info: %#04x\n"
4890 link_status.phy_type, link_status.link_speed,
4891 link_status.link_info, link_status.an_info,
4892 link_status.ext_info);
4894 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4898 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4900 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4901 struct i40e_hw *hw = &pf->hw;
4903 enum i40e_status_code aq_error = 0;
4905 struct i40e_aq_get_phy_abilities_resp abilities;
4907 aq_error = i40e_aq_get_phy_capabilities(hw,
4908 TRUE, FALSE, &abilities, NULL);
4910 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4915 "PHY Type : %#010x\n"
4917 "Abilities: %#04x\n"
4919 "EEER reg : %#010x\n"
4921 abilities.phy_type, abilities.link_speed,
4922 abilities.abilities, abilities.eee_capability,
4923 abilities.eeer_val, abilities.d3_lpan);
4925 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4929 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4931 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4932 struct ixl_vsi *vsi = &pf->vsi;
4933 struct ixl_mac_filter *f;
4938 int ftl_counter = 0;
4942 SLIST_FOREACH(f, &vsi->ftl, next) {
4947 sysctl_handle_string(oidp, "(none)", 6, req);
4951 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4952 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4954 sprintf(buf_i++, "\n");
4955 SLIST_FOREACH(f, &vsi->ftl, next) {
4957 MAC_FORMAT ", vlan %4d, flags %#06x",
4958 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4960 /* don't print '\n' for last entry */
4961 if (++ftl_counter != ftl_len) {
4962 sprintf(buf_i, "\n");
4967 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4969 printf("sysctl error: %d\n", error);
4970 free(buf, M_DEVBUF);
4974 #define IXL_SW_RES_SIZE 0x14
4976 ixl_res_alloc_cmp(const void *a, const void *b)
4978 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4979 one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4980 two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4982 return ((int)one->resource_type - (int)two->resource_type);
4986 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4988 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4989 struct i40e_hw *hw = &pf->hw;
4990 device_t dev = pf->dev;
4995 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4997 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4999 device_printf(dev, "Could not allocate sbuf for output.\n");
5003 bzero(resp, sizeof(resp));
5004 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5010 "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5011 __func__, error, hw->aq.asq_last_status);
5016 /* Sort entries by type for display */
5017 qsort(resp, num_entries,
5018 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5019 &ixl_res_alloc_cmp);
5021 sbuf_cat(buf, "\n");
5022 sbuf_printf(buf, "# of entries: %d\n", num_entries);
5024 "Type | Guaranteed | Total | Used | Un-allocated\n"
5025 " | (this) | (all) | (this) | (all) \n");
5026 for (int i = 0; i < num_entries; i++) {
5028 "%#4x | %10d %5d %6d %12d",
5029 resp[i].resource_type,
5033 resp[i].total_unalloced);
5034 if (i < num_entries - 1)
5035 sbuf_cat(buf, "\n");
5038 error = sbuf_finish(buf);
5040 device_printf(dev, "Error finishing sbuf: %d\n", error);
5047 ** Caller must init and delete sbuf; this function will clear and
5048 ** finish it for caller.
5051 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5055 if (seid == 0 && uplink)
5056 sbuf_cat(s, "Network");
5058 sbuf_cat(s, "Host");
5062 sbuf_printf(s, "MAC %d", seid - 2);
5063 else if (seid <= 15)
5064 sbuf_cat(s, "Reserved");
5065 else if (seid <= 31)
5066 sbuf_printf(s, "PF %d", seid - 16);
5067 else if (seid <= 159)
5068 sbuf_printf(s, "VF %d", seid - 32);
5069 else if (seid <= 287)
5070 sbuf_cat(s, "Reserved");
5071 else if (seid <= 511)
5072 sbuf_cat(s, "Other"); // for other structures
5073 else if (seid <= 895)
5074 sbuf_printf(s, "VSI %d", seid - 512);
5075 else if (seid <= 1023)
5076 sbuf_printf(s, "Reserved");
5078 sbuf_cat(s, "Invalid");
5081 return sbuf_data(s);
5085 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5087 struct ixl_pf *pf = (struct ixl_pf *)arg1;
5088 struct i40e_hw *hw = &pf->hw;
5089 device_t dev = pf->dev;
5093 u8 aq_buf[I40E_AQ_LARGE_BUF];
5096 struct i40e_aqc_get_switch_config_resp *sw_config;
5097 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5099 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5101 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5105 error = i40e_aq_get_switch_config(hw, sw_config,
5106 sizeof(aq_buf), &next, NULL);
5109 "%s: aq_get_switch_config() error %d, aq error %d\n",
5110 __func__, error, hw->aq.asq_last_status);
5115 nmbuf = sbuf_new_auto();
5117 device_printf(dev, "Could not allocate sbuf for name output.\n");
5122 sbuf_cat(buf, "\n");
5123 // Assuming <= 255 elements in switch
5124 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5126 ** Revision -- all elements are revision 1 for now
5129 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
5130 " | | | (uplink)\n");
5131 for (int i = 0; i < sw_config->header.num_reported; i++) {
5132 // "%4d (%8s) | %8s %8s %#8x",
5133 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5135 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5136 sw_config->element[i].seid, false));
5137 sbuf_cat(buf, " | ");
5138 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5139 sw_config->element[i].uplink_seid, true));
5141 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5142 sw_config->element[i].downlink_seid, false));
5144 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5145 if (i < sw_config->header.num_reported - 1)
5146 sbuf_cat(buf, "\n");
5150 error = sbuf_finish(buf);
5152 device_printf(dev, "Error finishing sbuf: %d\n", error);
5158 #endif /* IXL_DEBUG_SYSCTL */
5163 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5166 struct ixl_vsi *vsi;
5167 struct i40e_vsi_context vsi_ctx;
5169 uint16_t first_queue;
5170 enum i40e_status_code code;
5175 vsi_ctx.pf_num = hw->pf_id;
5176 vsi_ctx.uplink_seid = pf->veb_seid;
5177 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5178 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5179 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5181 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5183 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5184 vsi_ctx.info.switch_id = htole16(0);
5186 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5187 vsi_ctx.info.sec_flags = 0;
5188 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5189 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5191 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5192 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5193 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5195 vsi_ctx.info.valid_sections |=
5196 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5197 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5198 first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5199 for (i = 0; i < IXLV_MAX_QUEUES; i++)
5200 vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5201 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5202 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5204 vsi_ctx.info.tc_mapping[0] = htole16(
5205 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5206 (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5208 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5209 if (code != I40E_SUCCESS)
5210 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5211 vf->vsi.seid = vsi_ctx.seid;
5212 vf->vsi.vsi_num = vsi_ctx.vsi_number;
5213 vf->vsi.first_queue = first_queue;
5214 vf->vsi.num_queues = IXLV_MAX_QUEUES;
5216 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5217 if (code != I40E_SUCCESS)
5218 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5220 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5221 if (code != I40E_SUCCESS) {
5222 device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5223 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5224 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5227 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5232 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5239 error = ixl_vf_alloc_vsi(pf, vf);
5243 vf->vsi.hw_filters_add = 0;
5244 vf->vsi.hw_filters_del = 0;
5245 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5246 ixl_reconfigure_filters(&vf->vsi);
5252 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5259 * Two queues are mapped in a single register, so we have to do some
5260 * gymnastics to convert the queue number into a register index and
5264 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5266 qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5267 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5268 qtable |= val << shift;
5269 wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5273 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5282 * Contiguous mappings aren't actually supported by the hardware,
5283 * so we have to use non-contiguous mappings.
5285 wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5286 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5288 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5289 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5291 for (i = 0; i < vf->vsi.num_queues; i++) {
5292 qtable = (vf->vsi.first_queue + i) <<
5293 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5295 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5298 /* Map queues allocated to VF to its VSI. */
5299 for (i = 0; i < vf->vsi.num_queues; i++)
5300 ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5302 /* Set rest of VSI queues as unused. */
5303 for (; i < IXL_MAX_VSI_QUEUES; i++)
5304 ixl_vf_map_vsi_queue(hw, vf, i,
5305 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5311 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5320 i40e_aq_delete_element(hw, vsi->seid, NULL);
5324 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5327 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5332 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5335 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5336 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5341 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5344 uint32_t vfint_reg, vpint_reg;
5349 ixl_vf_vsi_release(pf, &vf->vsi);
5351 /* Index 0 has a special register. */
5352 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5354 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5355 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5356 ixl_vf_disable_queue_intr(hw, vfint_reg);
5359 /* Index 0 has a special register. */
5360 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5362 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5363 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5364 ixl_vf_unregister_intr(hw, vpint_reg);
5367 vf->vsi.num_queues = 0;
5371 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5375 uint16_t global_vf_num;
5379 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5381 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5382 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5383 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5384 ciad = rd32(hw, I40E_PF_PCI_CIAD);
5385 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5394 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5401 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5402 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5403 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5406 ixl_reinit_vf(pf, vf);
5410 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5413 uint32_t vfrstat, vfrtrig;
5418 error = ixl_flush_pcie(pf, vf);
5420 device_printf(pf->dev,
5421 "Timed out waiting for PCIe activity to stop on VF-%d\n",
5424 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5427 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5428 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5432 if (i == IXL_VF_RESET_TIMEOUT)
5433 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5435 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5437 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5438 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5439 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5441 if (vf->vsi.seid != 0)
5442 ixl_disable_rings(&vf->vsi);
5444 ixl_vf_release_resources(pf, vf);
5445 ixl_vf_setup_vsi(pf, vf);
5446 ixl_vf_map_queues(pf, vf);
5448 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5453 ixl_vc_opcode_str(uint16_t op)
5457 case I40E_VIRTCHNL_OP_VERSION:
5459 case I40E_VIRTCHNL_OP_RESET_VF:
5460 return ("RESET_VF");
5461 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5462 return ("GET_VF_RESOURCES");
5463 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5464 return ("CONFIG_TX_QUEUE");
5465 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5466 return ("CONFIG_RX_QUEUE");
5467 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5468 return ("CONFIG_VSI_QUEUES");
5469 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5470 return ("CONFIG_IRQ_MAP");
5471 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5472 return ("ENABLE_QUEUES");
5473 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5474 return ("DISABLE_QUEUES");
5475 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5476 return ("ADD_ETHER_ADDRESS");
5477 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5478 return ("DEL_ETHER_ADDRESS");
5479 case I40E_VIRTCHNL_OP_ADD_VLAN:
5480 return ("ADD_VLAN");
5481 case I40E_VIRTCHNL_OP_DEL_VLAN:
5482 return ("DEL_VLAN");
5483 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5484 return ("CONFIG_PROMISCUOUS_MODE");
5485 case I40E_VIRTCHNL_OP_GET_STATS:
5486 return ("GET_STATS");
5487 case I40E_VIRTCHNL_OP_FCOE:
5489 case I40E_VIRTCHNL_OP_EVENT:
5497 ixl_vc_opcode_level(uint16_t opcode)
5501 case I40E_VIRTCHNL_OP_GET_STATS:
5509 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5510 enum i40e_status_code status, void *msg, uint16_t len)
5516 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5518 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5519 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5520 ixl_vc_opcode_str(op), op, status, vf->vf_num);
5522 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5526 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5529 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5533 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5534 enum i40e_status_code status, const char *file, int line)
5537 I40E_VC_DEBUG(pf, 1,
5538 "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5539 ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5540 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5544 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5547 struct i40e_virtchnl_version_info reply;
5549 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5550 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5555 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5556 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5557 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5562 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5566 if (msg_size != 0) {
5567 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5572 ixl_reset_vf(pf, vf);
5574 /* No response to a reset message. */
5578 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5581 struct i40e_virtchnl_vf_resource reply;
5583 if (msg_size != 0) {
5584 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5589 bzero(&reply, sizeof(reply));
5591 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5594 reply.num_queue_pairs = vf->vsi.num_queues;
5595 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5596 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5597 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5598 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5599 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5601 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5602 I40E_SUCCESS, &reply, sizeof(reply));
5606 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5607 struct i40e_virtchnl_txq_info *info)
5610 struct i40e_hmc_obj_txq txq;
5611 uint16_t global_queue_num, global_vf_num;
5612 enum i40e_status_code status;
5616 global_queue_num = vf->vsi.first_queue + info->queue_id;
5617 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5618 bzero(&txq, sizeof(txq));
5620 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5621 if (status != I40E_SUCCESS)
5624 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5626 txq.head_wb_ena = info->headwb_enabled;
5627 txq.head_wb_addr = info->dma_headwb_addr;
5628 txq.qlen = info->ring_len;
5629 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5630 txq.rdylist_act = 0;
5632 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5633 if (status != I40E_SUCCESS)
5636 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5637 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5638 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5639 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5646 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5647 struct i40e_virtchnl_rxq_info *info)
5650 struct i40e_hmc_obj_rxq rxq;
5651 uint16_t global_queue_num;
5652 enum i40e_status_code status;
5655 global_queue_num = vf->vsi.first_queue + info->queue_id;
5656 bzero(&rxq, sizeof(rxq));
5658 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5661 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5662 info->max_pkt_size < ETHER_MIN_LEN)
5665 if (info->splithdr_enabled) {
5666 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5669 rxq.hsplit_0 = info->rx_split_pos &
5670 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5671 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5672 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5673 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5674 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5679 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5680 if (status != I40E_SUCCESS)
5683 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5684 rxq.qlen = info->ring_len;
5686 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5692 rxq.rxmax = info->max_pkt_size;
5693 rxq.tphrdesc_ena = 1;
5694 rxq.tphwdesc_ena = 1;
5695 rxq.tphdata_ena = 1;
5696 rxq.tphhead_ena = 1;
5700 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5701 if (status != I40E_SUCCESS)
5708 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5711 struct i40e_virtchnl_vsi_queue_config_info *info;
5712 struct i40e_virtchnl_queue_pair_info *pair;
5715 if (msg_size < sizeof(*info)) {
5716 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5722 if (info->num_queue_pairs == 0) {
5723 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5728 if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5729 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5734 if (info->vsi_id != vf->vsi.vsi_num) {
5735 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5740 for (i = 0; i < info->num_queue_pairs; i++) {
5741 pair = &info->qpair[i];
5743 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5744 pair->rxq.vsi_id != vf->vsi.vsi_num ||
5745 pair->txq.queue_id != pair->rxq.queue_id ||
5746 pair->txq.queue_id >= vf->vsi.num_queues) {
5748 i40e_send_vf_nack(pf, vf,
5749 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5753 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5754 i40e_send_vf_nack(pf, vf,
5755 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5759 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5760 i40e_send_vf_nack(pf, vf,
5761 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5766 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5770 ixl_vf_set_qctl(struct ixl_pf *pf,
5771 const struct i40e_virtchnl_vector_map *vector,
5772 enum i40e_queue_type cur_type, uint16_t cur_queue,
5773 enum i40e_queue_type *last_type, uint16_t *last_queue)
5775 uint32_t offset, qctl;
5778 if (cur_type == I40E_QUEUE_TYPE_RX) {
5779 offset = I40E_QINT_RQCTL(cur_queue);
5780 itr_indx = vector->rxitr_idx;
5782 offset = I40E_QINT_TQCTL(cur_queue);
5783 itr_indx = vector->txitr_idx;
5786 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5787 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5788 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5789 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5790 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5792 wr32(&pf->hw, offset, qctl);
5794 *last_type = cur_type;
5795 *last_queue = cur_queue;
5799 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5800 const struct i40e_virtchnl_vector_map *vector)
5804 enum i40e_queue_type type, last_type;
5805 uint32_t lnklst_reg;
5806 uint16_t rxq_map, txq_map, cur_queue, last_queue;
5810 rxq_map = vector->rxq_map;
5811 txq_map = vector->txq_map;
5813 last_queue = IXL_END_OF_INTR_LNKLST;
5814 last_type = I40E_QUEUE_TYPE_RX;
5817 * The datasheet says to optimize performance, RX queues and TX queues
5818 * should be interleaved in the interrupt linked list, so we process
5819 * both at once here.
5821 while ((rxq_map != 0) || (txq_map != 0)) {
5823 qindex = ffs(txq_map) - 1;
5824 type = I40E_QUEUE_TYPE_TX;
5825 cur_queue = vf->vsi.first_queue + qindex;
5826 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5827 &last_type, &last_queue);
5828 txq_map &= ~(1 << qindex);
5832 qindex = ffs(rxq_map) - 1;
5833 type = I40E_QUEUE_TYPE_RX;
5834 cur_queue = vf->vsi.first_queue + qindex;
5835 ixl_vf_set_qctl(pf, vector, type, cur_queue,
5836 &last_type, &last_queue);
5837 rxq_map &= ~(1 << qindex);
5841 if (vector->vector_id == 0)
5842 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5844 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5846 wr32(hw, lnklst_reg,
5847 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5848 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5854 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5857 struct i40e_virtchnl_irq_map_info *map;
5858 struct i40e_virtchnl_vector_map *vector;
5860 int i, largest_txq, largest_rxq;
5864 if (msg_size < sizeof(*map)) {
5865 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5871 if (map->num_vectors == 0) {
5872 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5877 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5878 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5883 for (i = 0; i < map->num_vectors; i++) {
5884 vector = &map->vecmap[i];
5886 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5887 vector->vsi_id != vf->vsi.vsi_num) {
5888 i40e_send_vf_nack(pf, vf,
5889 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5893 if (vector->rxq_map != 0) {
5894 largest_rxq = fls(vector->rxq_map) - 1;
5895 if (largest_rxq >= vf->vsi.num_queues) {
5896 i40e_send_vf_nack(pf, vf,
5897 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5903 if (vector->txq_map != 0) {
5904 largest_txq = fls(vector->txq_map) - 1;
5905 if (largest_txq >= vf->vsi.num_queues) {
5906 i40e_send_vf_nack(pf, vf,
5907 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5913 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5914 vector->txitr_idx > IXL_MAX_ITR_IDX) {
5915 i40e_send_vf_nack(pf, vf,
5916 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5921 ixl_vf_config_vector(pf, vf, vector);
5924 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
5928 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5931 struct i40e_virtchnl_queue_select *select;
5934 if (msg_size != sizeof(*select)) {
5935 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5941 if (select->vsi_id != vf->vsi.vsi_num ||
5942 select->rx_queues == 0 || select->tx_queues == 0) {
5943 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5948 error = ixl_enable_rings(&vf->vsi);
5950 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5955 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
5959 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
5960 void *msg, uint16_t msg_size)
5962 struct i40e_virtchnl_queue_select *select;
5965 if (msg_size != sizeof(*select)) {
5966 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5972 if (select->vsi_id != vf->vsi.vsi_num ||
5973 select->rx_queues == 0 || select->tx_queues == 0) {
5974 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5979 error = ixl_disable_rings(&vf->vsi);
5981 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5986 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
5990 ixl_zero_mac(const uint8_t *addr)
5992 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
5994 return (cmp_etheraddr(addr, zero));
5998 ixl_bcast_mac(const uint8_t *addr)
6001 return (cmp_etheraddr(addr, ixl_bcast_addr));
6005 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6008 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6012 * If the VF is not allowed to change its MAC address, don't let it
6013 * set a MAC filter for an address that is not a multicast address and
6014 * is not its assigned MAC.
6016 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6017 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6024 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6027 struct i40e_virtchnl_ether_addr_list *addr_list;
6028 struct i40e_virtchnl_ether_addr *addr;
6029 struct ixl_vsi *vsi;
6031 size_t expected_size;
6035 if (msg_size < sizeof(*addr_list)) {
6036 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6042 expected_size = sizeof(*addr_list) +
6043 addr_list->num_elements * sizeof(*addr);
6045 if (addr_list->num_elements == 0 ||
6046 addr_list->vsi_id != vsi->vsi_num ||
6047 msg_size != expected_size) {
6048 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6053 for (i = 0; i < addr_list->num_elements; i++) {
6054 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6055 i40e_send_vf_nack(pf, vf,
6056 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6061 for (i = 0; i < addr_list->num_elements; i++) {
6062 addr = &addr_list->list[i];
6063 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6066 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6070 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6073 struct i40e_virtchnl_ether_addr_list *addr_list;
6074 struct i40e_virtchnl_ether_addr *addr;
6075 size_t expected_size;
6078 if (msg_size < sizeof(*addr_list)) {
6079 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6085 expected_size = sizeof(*addr_list) +
6086 addr_list->num_elements * sizeof(*addr);
6088 if (addr_list->num_elements == 0 ||
6089 addr_list->vsi_id != vf->vsi.vsi_num ||
6090 msg_size != expected_size) {
6091 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6096 for (i = 0; i < addr_list->num_elements; i++) {
6097 addr = &addr_list->list[i];
6098 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6099 i40e_send_vf_nack(pf, vf,
6100 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6105 for (i = 0; i < addr_list->num_elements; i++) {
6106 addr = &addr_list->list[i];
6107 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6110 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6113 static enum i40e_status_code
6114 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6116 struct i40e_vsi_context vsi_ctx;
6118 vsi_ctx.seid = vf->vsi.seid;
6120 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6121 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6122 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6123 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6124 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6128 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6131 struct i40e_virtchnl_vlan_filter_list *filter_list;
6132 enum i40e_status_code code;
6133 size_t expected_size;
6136 if (msg_size < sizeof(*filter_list)) {
6137 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6143 expected_size = sizeof(*filter_list) +
6144 filter_list->num_elements * sizeof(uint16_t);
6145 if (filter_list->num_elements == 0 ||
6146 filter_list->vsi_id != vf->vsi.vsi_num ||
6147 msg_size != expected_size) {
6148 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6153 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6154 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6159 for (i = 0; i < filter_list->num_elements; i++) {
6160 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6161 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6167 code = ixl_vf_enable_vlan_strip(pf, vf);
6168 if (code != I40E_SUCCESS) {
6169 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6173 for (i = 0; i < filter_list->num_elements; i++)
6174 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6176 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6180 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6183 struct i40e_virtchnl_vlan_filter_list *filter_list;
6185 size_t expected_size;
6187 if (msg_size < sizeof(*filter_list)) {
6188 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6194 expected_size = sizeof(*filter_list) +
6195 filter_list->num_elements * sizeof(uint16_t);
6196 if (filter_list->num_elements == 0 ||
6197 filter_list->vsi_id != vf->vsi.vsi_num ||
6198 msg_size != expected_size) {
6199 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6204 for (i = 0; i < filter_list->num_elements; i++) {
6205 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6206 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6212 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6213 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6218 for (i = 0; i < filter_list->num_elements; i++)
6219 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6221 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6225 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6226 void *msg, uint16_t msg_size)
6228 struct i40e_virtchnl_promisc_info *info;
6229 enum i40e_status_code code;
6231 if (msg_size != sizeof(*info)) {
6232 i40e_send_vf_nack(pf, vf,
6233 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6237 if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6238 i40e_send_vf_nack(pf, vf,
6239 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6244 if (info->vsi_id != vf->vsi.vsi_num) {
6245 i40e_send_vf_nack(pf, vf,
6246 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6250 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6251 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6252 if (code != I40E_SUCCESS) {
6253 i40e_send_vf_nack(pf, vf,
6254 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6258 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6259 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6260 if (code != I40E_SUCCESS) {
6261 i40e_send_vf_nack(pf, vf,
6262 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6266 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6270 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6273 struct i40e_virtchnl_queue_select *queue;
6275 if (msg_size != sizeof(*queue)) {
6276 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6282 if (queue->vsi_id != vf->vsi.vsi_num) {
6283 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6288 ixl_update_eth_stats(&vf->vsi);
6290 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6291 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6295 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6299 uint16_t vf_num, msg_size;
6302 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6303 opcode = le32toh(event->desc.cookie_high);
6305 if (vf_num >= pf->num_vfs) {
6306 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6310 vf = &pf->vfs[vf_num];
6311 msg = event->msg_buf;
6312 msg_size = event->msg_len;
6314 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6315 "Got msg %s(%d) from VF-%d of size %d\n",
6316 ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6319 case I40E_VIRTCHNL_OP_VERSION:
6320 ixl_vf_version_msg(pf, vf, msg, msg_size);
6322 case I40E_VIRTCHNL_OP_RESET_VF:
6323 ixl_vf_reset_msg(pf, vf, msg, msg_size);
6325 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6326 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6328 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6329 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6331 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6332 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6334 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6335 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6337 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6338 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6340 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6341 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6343 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6344 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6346 case I40E_VIRTCHNL_OP_ADD_VLAN:
6347 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6349 case I40E_VIRTCHNL_OP_DEL_VLAN:
6350 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6352 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6353 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6355 case I40E_VIRTCHNL_OP_GET_STATS:
6356 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6359 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6360 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6361 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6363 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6368 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6370 ixl_handle_vflr(void *arg, int pending)
6374 uint16_t global_vf_num;
6375 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6382 for (i = 0; i < pf->num_vfs; i++) {
6383 global_vf_num = hw->func_caps.vf_base_id + i;
6385 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6386 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6387 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6388 if (vflrstat & vflrstat_mask) {
6389 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6392 ixl_reinit_vf(pf, &pf->vfs[i]);
6396 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6397 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6398 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6405 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6409 case I40E_AQ_RC_EPERM:
6411 case I40E_AQ_RC_ENOENT:
6413 case I40E_AQ_RC_ESRCH:
6415 case I40E_AQ_RC_EINTR:
6417 case I40E_AQ_RC_EIO:
6419 case I40E_AQ_RC_ENXIO:
6421 case I40E_AQ_RC_E2BIG:
6423 case I40E_AQ_RC_EAGAIN:
6425 case I40E_AQ_RC_ENOMEM:
6427 case I40E_AQ_RC_EACCES:
6429 case I40E_AQ_RC_EFAULT:
6431 case I40E_AQ_RC_EBUSY:
6433 case I40E_AQ_RC_EEXIST:
6435 case I40E_AQ_RC_EINVAL:
6437 case I40E_AQ_RC_ENOTTY:
6439 case I40E_AQ_RC_ENOSPC:
6441 case I40E_AQ_RC_ENOSYS:
6443 case I40E_AQ_RC_ERANGE:
6445 case I40E_AQ_RC_EFLUSHED:
6446 return (EINVAL); /* No exact equivalent in errno.h */
6447 case I40E_AQ_RC_BAD_ADDR:
6449 case I40E_AQ_RC_EMODE:
6451 case I40E_AQ_RC_EFBIG:
6459 ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6463 struct ixl_vsi *pf_vsi;
6464 enum i40e_status_code ret;
6467 pf = device_get_softc(dev);
6472 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6475 if (pf->vfs == NULL) {
6480 for (i = 0; i < num_vfs; i++)
6481 sysctl_ctx_init(&pf->vfs[i].ctx);
6483 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6484 1, FALSE, FALSE, &pf->veb_seid, NULL);
6485 if (ret != I40E_SUCCESS) {
6486 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6487 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6492 ixl_configure_msix(pf);
6493 ixl_enable_adminq(hw);
6495 pf->num_vfs = num_vfs;
6500 free(pf->vfs, M_IXL);
6507 ixl_uninit_iov(device_t dev)
6511 struct ixl_vsi *vsi;
6516 pf = device_get_softc(dev);
6522 for (i = 0; i < pf->num_vfs; i++) {
6523 if (pf->vfs[i].vsi.seid != 0)
6524 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6527 if (pf->veb_seid != 0) {
6528 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6532 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6533 ixl_disable_intr(vsi);
6536 num_vfs = pf->num_vfs;
6542 /* Do this after the unlock as sysctl_ctx_free might sleep. */
6543 for (i = 0; i < num_vfs; i++)
6544 sysctl_ctx_free(&vfs[i].ctx);
6549 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6551 char sysctl_name[QUEUE_NAME_LEN];
6558 pf = device_get_softc(dev);
6559 vf = &pf->vfs[vfnum];
6565 vf->vf_flags = VF_FLAG_ENABLED;
6566 SLIST_INIT(&vf->vsi.ftl);
6568 error = ixl_vf_setup_vsi(pf, vf);
6572 if (nvlist_exists_binary(params, "mac-addr")) {
6573 mac = nvlist_get_binary(params, "mac-addr", &size);
6574 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6576 if (nvlist_get_bool(params, "allow-set-mac"))
6577 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6580 * If the administrator has not specified a MAC address then
6581 * we must allow the VF to choose one.
6583 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6585 if (nvlist_get_bool(params, "mac-anti-spoof"))
6586 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6588 if (nvlist_get_bool(params, "allow-promisc"))
6589 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6591 vf->vf_flags |= VF_FLAG_VLAN_CAP;
6593 ixl_reset_vf(pf, vf);
6597 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6598 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6603 #endif /* PCI_IOV */