1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
45 #include <net/rss_config.h>
48 /*********************************************************************
50 *********************************************************************/
51 char ixl_driver_version[] = "1.3.6";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into ixl_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static ixl_vendor_info_t ixl_vendor_info_array[] =
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73 /* required last entry */
77 /*********************************************************************
78 * Table of branding strings
79 *********************************************************************/
81 static char *ixl_strings[] = {
82 "Intel(R) Ethernet Connection XL710 Driver"
86 /*********************************************************************
88 *********************************************************************/
89 static int ixl_probe(device_t);
90 static int ixl_attach(device_t);
91 static int ixl_detach(device_t);
92 static int ixl_shutdown(device_t);
93 static int ixl_get_hw_capabilities(struct ixl_pf *);
94 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
96 static void ixl_init(void *);
97 static void ixl_init_locked(struct ixl_pf *);
98 static void ixl_stop(struct ixl_pf *);
99 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
100 static int ixl_media_change(struct ifnet *);
101 static void ixl_update_link_status(struct ixl_pf *);
102 static int ixl_allocate_pci_resources(struct ixl_pf *);
103 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
104 static int ixl_setup_stations(struct ixl_pf *);
105 static int ixl_switch_config(struct ixl_pf *);
106 static int ixl_initialize_vsi(struct ixl_vsi *);
107 static int ixl_assign_vsi_msix(struct ixl_pf *);
108 static int ixl_assign_vsi_legacy(struct ixl_pf *);
109 static int ixl_init_msix(struct ixl_pf *);
110 static void ixl_configure_msix(struct ixl_pf *);
111 static void ixl_configure_itr(struct ixl_pf *);
112 static void ixl_configure_legacy(struct ixl_pf *);
113 static void ixl_free_pci_resources(struct ixl_pf *);
114 static void ixl_local_timer(void *);
115 static int ixl_setup_interface(device_t, struct ixl_vsi *);
116 static bool ixl_config_link(struct i40e_hw *);
117 static void ixl_config_rss(struct ixl_vsi *);
118 static void ixl_set_queue_rx_itr(struct ixl_queue *);
119 static void ixl_set_queue_tx_itr(struct ixl_queue *);
120 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
122 static void ixl_enable_rings(struct ixl_vsi *);
123 static void ixl_disable_rings(struct ixl_vsi *);
124 static void ixl_enable_intr(struct ixl_vsi *);
125 static void ixl_disable_intr(struct ixl_vsi *);
127 static void ixl_enable_adminq(struct i40e_hw *);
128 static void ixl_disable_adminq(struct i40e_hw *);
129 static void ixl_enable_queue(struct i40e_hw *, int);
130 static void ixl_disable_queue(struct i40e_hw *, int);
131 static void ixl_enable_legacy(struct i40e_hw *);
132 static void ixl_disable_legacy(struct i40e_hw *);
134 static void ixl_set_promisc(struct ixl_vsi *);
135 static void ixl_add_multi(struct ixl_vsi *);
136 static void ixl_del_multi(struct ixl_vsi *);
137 static void ixl_register_vlan(void *, struct ifnet *, u16);
138 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
139 static void ixl_setup_vlan_filters(struct ixl_vsi *);
141 static void ixl_init_filters(struct ixl_vsi *);
142 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
143 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
144 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
145 static void ixl_del_hw_filters(struct ixl_vsi *, int);
146 static struct ixl_mac_filter *
147 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
148 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
150 /* Sysctl debug interface */
151 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
152 static void ixl_print_debug_info(struct ixl_pf *);
154 /* The MSI/X Interrupt handlers */
155 static void ixl_intr(void *);
156 static void ixl_msix_que(void *);
157 static void ixl_msix_adminq(void *);
158 static void ixl_handle_mdd_event(struct ixl_pf *);
160 /* Deferred interrupt tasklets */
161 static void ixl_do_adminq(void *, int);
163 /* Sysctl handlers */
164 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
165 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
166 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
167 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
170 static void ixl_add_hw_stats(struct ixl_pf *);
171 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
172 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
173 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
174 struct sysctl_oid_list *,
175 struct i40e_eth_stats *);
176 static void ixl_update_stats_counters(struct ixl_pf *);
177 static void ixl_update_eth_stats(struct ixl_vsi *);
178 static void ixl_pf_reset_stats(struct ixl_pf *);
179 static void ixl_vsi_reset_stats(struct ixl_vsi *);
180 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
182 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
185 #ifdef IXL_DEBUG_SYSCTL
186 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
187 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
188 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
189 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
190 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
191 static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
194 /*********************************************************************
195 * FreeBSD Device Interface Entry Points
196 *********************************************************************/
198 static device_method_t ixl_methods[] = {
199 /* Device interface */
200 DEVMETHOD(device_probe, ixl_probe),
201 DEVMETHOD(device_attach, ixl_attach),
202 DEVMETHOD(device_detach, ixl_detach),
203 DEVMETHOD(device_shutdown, ixl_shutdown),
207 static driver_t ixl_driver = {
208 "ixl", ixl_methods, sizeof(struct ixl_pf),
211 devclass_t ixl_devclass;
212 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
214 MODULE_DEPEND(ixl, pci, 1, 1, 1);
215 MODULE_DEPEND(ixl, ether, 1, 1, 1);
217 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
218 #endif /* DEV_NETMAP */
221 ** Global reset mutex
223 static struct mtx ixl_reset_mtx;
226 ** TUNEABLE PARAMETERS:
229 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
230 "IXL driver parameters");
233 * MSIX should be the default for best performance,
234 * but this allows it to be forced off for testing.
236 static int ixl_enable_msix = 1;
237 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
238 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
239 "Enable MSI-X interrupts");
242 ** Number of descriptors per ring:
243 ** - TX and RX are the same size
245 static int ixl_ringsz = DEFAULT_RING;
246 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
247 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
248 &ixl_ringsz, 0, "Descriptor Ring Size");
251 ** This can be set manually, if left as 0 the
252 ** number of queues will be calculated based
253 ** on cpus and msix vectors available.
255 int ixl_max_queues = 0;
256 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
257 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
258 &ixl_max_queues, 0, "Number of Queues");
261 ** Controls for Interrupt Throttling
262 ** - true/false for dynamic adjustment
263 ** - default values for static ITR
265 int ixl_dynamic_rx_itr = 0;
266 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
267 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
268 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
270 int ixl_dynamic_tx_itr = 0;
271 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
272 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
273 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
275 int ixl_rx_itr = IXL_ITR_8K;
276 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
277 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
278 &ixl_rx_itr, 0, "RX Interrupt Rate");
280 int ixl_tx_itr = IXL_ITR_4K;
281 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
282 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
283 &ixl_tx_itr, 0, "TX Interrupt Rate");
286 static int ixl_enable_fdir = 1;
287 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
288 /* Rate at which we sample */
289 int ixl_atr_rate = 20;
290 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
294 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
295 #include <dev/netmap/if_ixl_netmap.h>
296 #endif /* DEV_NETMAP */
298 static char *ixl_fc_string[6] = {
308 /*********************************************************************
309 * Device identification routine
311 * ixl_probe determines if the driver should be loaded on
312 * the hardware based on PCI vendor/device id of the device.
314 * return BUS_PROBE_DEFAULT on success, positive on failure
315 *********************************************************************/
318 ixl_probe(device_t dev)
320 ixl_vendor_info_t *ent;
322 u16 pci_vendor_id, pci_device_id;
323 u16 pci_subvendor_id, pci_subdevice_id;
324 char device_name[256];
325 static bool lock_init = FALSE;
327 INIT_DEBUGOUT("ixl_probe: begin");
329 pci_vendor_id = pci_get_vendor(dev);
330 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
333 pci_device_id = pci_get_device(dev);
334 pci_subvendor_id = pci_get_subvendor(dev);
335 pci_subdevice_id = pci_get_subdevice(dev);
337 ent = ixl_vendor_info_array;
338 while (ent->vendor_id != 0) {
339 if ((pci_vendor_id == ent->vendor_id) &&
340 (pci_device_id == ent->device_id) &&
342 ((pci_subvendor_id == ent->subvendor_id) ||
343 (ent->subvendor_id == 0)) &&
345 ((pci_subdevice_id == ent->subdevice_id) ||
346 (ent->subdevice_id == 0))) {
347 sprintf(device_name, "%s, Version - %s",
348 ixl_strings[ent->index],
350 device_set_desc_copy(dev, device_name);
351 /* One shot mutex init */
352 if (lock_init == FALSE) {
354 mtx_init(&ixl_reset_mtx,
356 "IXL RESET Lock", MTX_DEF);
358 return (BUS_PROBE_DEFAULT);
365 /*********************************************************************
366 * Device initialization routine
368 * The attach entry point is called when the driver is being loaded.
369 * This routine identifies the type of hardware, allocates all resources
370 * and initializes the hardware.
372 * return 0 on success, positive on failure
373 *********************************************************************/
376 ixl_attach(device_t dev)
384 INIT_DEBUGOUT("ixl_attach: begin");
386 /* Allocate, clear, and link in our primary soft structure */
387 pf = device_get_softc(dev);
388 pf->dev = pf->osdep.dev = dev;
392 ** Note this assumes we have a single embedded VSI,
393 ** this could be enhanced later to allocate multiple
399 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
401 /* Set up the timer callout */
402 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
408 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
410 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
411 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
413 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
415 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
416 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
418 pf, 0, ixl_current_speed, "A", "Current Port Speed");
420 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
423 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
425 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
426 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
427 OID_AUTO, "rx_itr", CTLFLAG_RW,
428 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
430 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
431 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
432 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
433 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
435 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
436 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
437 OID_AUTO, "tx_itr", CTLFLAG_RW,
438 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
440 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
441 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
443 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
445 #ifdef IXL_DEBUG_SYSCTL
446 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
449 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
451 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
452 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
454 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
456 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
457 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
459 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
461 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
462 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
464 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
466 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
467 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
469 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
471 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
472 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
473 OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
474 pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
477 /* Save off the PCI information */
478 hw->vendor_id = pci_get_vendor(dev);
479 hw->device_id = pci_get_device(dev);
480 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
481 hw->subsystem_vendor_id =
482 pci_read_config(dev, PCIR_SUBVEND_0, 2);
483 hw->subsystem_device_id =
484 pci_read_config(dev, PCIR_SUBDEV_0, 2);
486 hw->bus.device = pci_get_slot(dev);
487 hw->bus.func = pci_get_function(dev);
489 /* Do PCI setup - map BAR0, etc */
490 if (ixl_allocate_pci_resources(pf)) {
491 device_printf(dev, "Allocation of PCI resources failed\n");
496 /* Create for initial debugging use */
497 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
498 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
499 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
500 ixl_debug_info, "I", "Debug Information");
503 /* Establish a clean starting point */
505 error = i40e_pf_reset(hw);
507 device_printf(dev,"PF reset failure %x\n", error);
512 /* Set admin queue parameters */
513 hw->aq.num_arq_entries = IXL_AQ_LEN;
514 hw->aq.num_asq_entries = IXL_AQ_LEN;
515 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
516 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
518 /* Initialize the shared code */
519 error = i40e_init_shared_code(hw);
521 device_printf(dev,"Unable to initialize the shared code\n");
526 /* Set up the admin queue */
527 error = i40e_init_adminq(hw);
529 device_printf(dev, "The driver for the device stopped "
530 "because the NVM image is newer than expected.\n"
531 "You must install the most recent version of "
532 " the network driver.\n");
535 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
537 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
538 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
539 device_printf(dev, "The driver for the device detected "
540 "a newer version of the NVM image than expected.\n"
541 "Please install the most recent version of the network driver.\n");
542 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
543 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
544 device_printf(dev, "The driver for the device detected "
545 "an older version of the NVM image than expected.\n"
546 "Please update the NVM image.\n");
549 i40e_clear_pxe_mode(hw);
551 /* Get capabilities from the device */
552 error = ixl_get_hw_capabilities(pf);
554 device_printf(dev, "HW capabilities failure!\n");
558 /* Set up host memory cache */
559 error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
561 device_printf(dev, "init_lan_hmc failed: %d\n", error);
565 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
567 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
571 /* Disable LLDP from the firmware */
572 i40e_aq_stop_lldp(hw, TRUE, NULL);
574 i40e_get_mac_addr(hw, hw->mac.addr);
575 error = i40e_validate_mac_addr(hw->mac.addr);
577 device_printf(dev, "validate_mac_addr failed: %d\n", error);
580 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
581 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
583 /* Set up VSI and queues */
584 if (ixl_setup_stations(pf) != 0) {
585 device_printf(dev, "setup stations failed!\n");
590 /* Initialize mac filter list for VSI */
591 SLIST_INIT(&vsi->ftl);
593 /* Set up interrupt routing here */
595 error = ixl_assign_vsi_msix(pf);
597 error = ixl_assign_vsi_legacy(pf);
601 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
602 (hw->aq.fw_maj_ver < 4)) {
604 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
606 device_printf(dev, "link restart failed, aq_err=%d\n",
607 pf->hw.aq.asq_last_status);
610 /* Determine link state */
611 vsi->link_up = ixl_config_link(hw);
613 /* Report if Unqualified modules are found */
614 if ((vsi->link_up == FALSE) &&
615 (pf->hw.phy.link_info.link_info &
616 I40E_AQ_MEDIA_AVAILABLE) &&
617 (!(pf->hw.phy.link_info.an_info &
618 I40E_AQ_QUALIFIED_MODULE)))
619 device_printf(dev, "Link failed because "
620 "an unqualified module was detected\n");
622 /* Setup OS specific network interface */
623 if (ixl_setup_interface(dev, vsi) != 0) {
624 device_printf(dev, "interface setup failed!\n");
629 error = ixl_switch_config(pf);
631 device_printf(dev, "Initial switch config failed: %d\n", error);
635 /* Limit phy interrupts to link and modules failure */
636 error = i40e_aq_set_phy_int_mask(hw,
637 I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
639 device_printf(dev, "set phy mask failed: %d\n", error);
641 /* Get the bus configuration and set the shared code */
642 bus = ixl_get_bus_info(hw, dev);
643 i40e_set_pci_config_data(hw, bus);
645 /* Initialize statistics */
646 ixl_pf_reset_stats(pf);
647 ixl_update_stats_counters(pf);
648 ixl_add_hw_stats(pf);
650 /* Register for VLAN events */
651 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
652 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
653 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
654 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
658 ixl_netmap_attach(vsi);
659 #endif /* DEV_NETMAP */
660 INIT_DEBUGOUT("ixl_attach: end");
664 if (vsi->ifp != NULL)
667 i40e_shutdown_lan_hmc(hw);
669 i40e_shutdown_adminq(hw);
671 ixl_free_pci_resources(pf);
673 IXL_PF_LOCK_DESTROY(pf);
677 /*********************************************************************
678 * Device removal routine
680 * The detach entry point is called when the driver is being removed.
681 * This routine stops the adapter and deallocates all the resources
682 * that were allocated for driver operation.
684 * return 0 on success, positive on failure
685 *********************************************************************/
688 ixl_detach(device_t dev)
690 struct ixl_pf *pf = device_get_softc(dev);
691 struct i40e_hw *hw = &pf->hw;
692 struct ixl_vsi *vsi = &pf->vsi;
693 struct ixl_queue *que = vsi->queues;
696 INIT_DEBUGOUT("ixl_detach: begin");
698 /* Make sure VLANS are not using driver */
699 if (vsi->ifp->if_vlantrunk != NULL) {
700 device_printf(dev,"Vlan in use, detach first\n");
704 ether_ifdetach(vsi->ifp);
705 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
711 for (int i = 0; i < vsi->num_queues; i++, que++) {
713 taskqueue_drain(que->tq, &que->task);
714 taskqueue_drain(que->tq, &que->tx_task);
715 taskqueue_free(que->tq);
719 /* Shutdown LAN HMC */
720 status = i40e_shutdown_lan_hmc(hw);
723 "Shutdown LAN HMC failed with code %d\n", status);
725 /* Shutdown admin queue */
726 status = i40e_shutdown_adminq(hw);
729 "Shutdown Admin queue failed with code %d\n", status);
731 /* Unregister VLAN events */
732 if (vsi->vlan_attach != NULL)
733 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
734 if (vsi->vlan_detach != NULL)
735 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
737 callout_drain(&pf->timer);
739 netmap_detach(vsi->ifp);
740 #endif /* DEV_NETMAP */
743 ixl_free_pci_resources(pf);
744 bus_generic_detach(dev);
747 IXL_PF_LOCK_DESTROY(pf);
751 /*********************************************************************
753 * Shutdown entry point
755 **********************************************************************/
758 ixl_shutdown(device_t dev)
760 struct ixl_pf *pf = device_get_softc(dev);
768 /*********************************************************************
770 * Get the hardware capabilities
772 **********************************************************************/
775 ixl_get_hw_capabilities(struct ixl_pf *pf)
777 struct i40e_aqc_list_capabilities_element_resp *buf;
778 struct i40e_hw *hw = &pf->hw;
779 device_t dev = pf->dev;
784 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
786 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
787 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
788 device_printf(dev, "Unable to allocate cap memory\n");
792 /* This populates the hw struct */
793 error = i40e_aq_discover_capabilities(hw, buf, len,
794 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
796 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
798 /* retry once with a larger buffer */
802 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
803 device_printf(dev, "capability discovery failed: %d\n",
804 pf->hw.aq.asq_last_status);
808 /* Capture this PF's starting queue pair */
809 pf->qbase = hw->func_caps.base_queue;
812 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
813 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
814 hw->pf_id, hw->func_caps.num_vfs,
815 hw->func_caps.num_msix_vectors,
816 hw->func_caps.num_msix_vectors_vf,
817 hw->func_caps.fd_filters_guaranteed,
818 hw->func_caps.fd_filters_best_effort,
819 hw->func_caps.num_tx_qp,
820 hw->func_caps.num_rx_qp,
821 hw->func_caps.base_queue);
827 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
829 device_t dev = vsi->dev;
831 /* Enable/disable TXCSUM/TSO4 */
832 if (!(ifp->if_capenable & IFCAP_TXCSUM)
833 && !(ifp->if_capenable & IFCAP_TSO4)) {
834 if (mask & IFCAP_TXCSUM) {
835 ifp->if_capenable |= IFCAP_TXCSUM;
836 /* enable TXCSUM, restore TSO if previously enabled */
837 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
838 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
839 ifp->if_capenable |= IFCAP_TSO4;
842 else if (mask & IFCAP_TSO4) {
843 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
844 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
846 "TSO4 requires txcsum, enabling both...\n");
848 } else if((ifp->if_capenable & IFCAP_TXCSUM)
849 && !(ifp->if_capenable & IFCAP_TSO4)) {
850 if (mask & IFCAP_TXCSUM)
851 ifp->if_capenable &= ~IFCAP_TXCSUM;
852 else if (mask & IFCAP_TSO4)
853 ifp->if_capenable |= IFCAP_TSO4;
854 } else if((ifp->if_capenable & IFCAP_TXCSUM)
855 && (ifp->if_capenable & IFCAP_TSO4)) {
856 if (mask & IFCAP_TXCSUM) {
857 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
858 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
860 "TSO4 requires txcsum, disabling both...\n");
861 } else if (mask & IFCAP_TSO4)
862 ifp->if_capenable &= ~IFCAP_TSO4;
865 /* Enable/disable TXCSUM_IPV6/TSO6 */
866 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
867 && !(ifp->if_capenable & IFCAP_TSO6)) {
868 if (mask & IFCAP_TXCSUM_IPV6) {
869 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
870 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
871 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
872 ifp->if_capenable |= IFCAP_TSO6;
874 } else if (mask & IFCAP_TSO6) {
875 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
878 "TSO6 requires txcsum6, enabling both...\n");
880 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
881 && !(ifp->if_capenable & IFCAP_TSO6)) {
882 if (mask & IFCAP_TXCSUM_IPV6)
883 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
884 else if (mask & IFCAP_TSO6)
885 ifp->if_capenable |= IFCAP_TSO6;
886 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
887 && (ifp->if_capenable & IFCAP_TSO6)) {
888 if (mask & IFCAP_TXCSUM_IPV6) {
889 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
890 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
892 "TSO6 requires txcsum6, disabling both...\n");
893 } else if (mask & IFCAP_TSO6)
894 ifp->if_capenable &= ~IFCAP_TSO6;
898 /*********************************************************************
901 * ixl_ioctl is called when the user wants to configure the
904 * return 0 on success, positive on failure
905 **********************************************************************/
908 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
910 struct ixl_vsi *vsi = ifp->if_softc;
911 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
912 struct ifreq *ifr = (struct ifreq *) data;
913 #if defined(INET) || defined(INET6)
914 struct ifaddr *ifa = (struct ifaddr *)data;
915 bool avoid_reset = FALSE;
923 if (ifa->ifa_addr->sa_family == AF_INET)
927 if (ifa->ifa_addr->sa_family == AF_INET6)
930 #if defined(INET) || defined(INET6)
932 ** Calling init results in link renegotiation,
933 ** so we avoid doing it when possible.
936 ifp->if_flags |= IFF_UP;
937 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
940 if (!(ifp->if_flags & IFF_NOARP))
941 arp_ifinit(ifp, ifa);
944 error = ether_ioctl(ifp, command, data);
948 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
949 if (ifr->ifr_mtu > IXL_MAX_FRAME -
950 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
954 ifp->if_mtu = ifr->ifr_mtu;
955 vsi->max_frame_size =
956 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
957 + ETHER_VLAN_ENCAP_LEN;
963 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
965 if (ifp->if_flags & IFF_UP) {
966 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
967 if ((ifp->if_flags ^ pf->if_flags) &
968 (IFF_PROMISC | IFF_ALLMULTI)) {
969 ixl_set_promisc(vsi);
974 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
976 pf->if_flags = ifp->if_flags;
980 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
983 ixl_disable_intr(vsi);
985 ixl_enable_intr(vsi);
990 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
991 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
993 ixl_disable_intr(vsi);
995 ixl_enable_intr(vsi);
1001 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1002 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1006 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1007 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1009 ixl_cap_txcsum_tso(vsi, ifp, mask);
1011 if (mask & IFCAP_RXCSUM)
1012 ifp->if_capenable ^= IFCAP_RXCSUM;
1013 if (mask & IFCAP_RXCSUM_IPV6)
1014 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1015 if (mask & IFCAP_LRO)
1016 ifp->if_capenable ^= IFCAP_LRO;
1017 if (mask & IFCAP_VLAN_HWTAGGING)
1018 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1019 if (mask & IFCAP_VLAN_HWFILTER)
1020 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1021 if (mask & IFCAP_VLAN_HWTSO)
1022 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1023 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1025 ixl_init_locked(pf);
1028 VLAN_CAPABILITIES(ifp);
1034 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1035 error = ether_ioctl(ifp, command, data);
1043 /*********************************************************************
1046 * This routine is used in two ways. It is used by the stack as
1047 * init entry point in network interface structure. It is also used
1048 * by the driver as a hw/sw initialization routine to get to a
1051 * return 0 on success, positive on failure
1052 **********************************************************************/
1055 ixl_init_locked(struct ixl_pf *pf)
1057 struct i40e_hw *hw = &pf->hw;
1058 struct ixl_vsi *vsi = &pf->vsi;
1059 struct ifnet *ifp = vsi->ifp;
1060 device_t dev = pf->dev;
1061 struct i40e_filter_control_settings filter;
1062 u8 tmpaddr[ETHER_ADDR_LEN];
1065 mtx_assert(&pf->pf_mtx, MA_OWNED);
1066 INIT_DEBUGOUT("ixl_init: begin");
1069 /* Get the latest mac address... User might use a LAA */
1070 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1071 I40E_ETH_LENGTH_OF_ADDRESS);
1072 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1073 i40e_validate_mac_addr(tmpaddr)) {
1074 bcopy(tmpaddr, hw->mac.addr,
1075 I40E_ETH_LENGTH_OF_ADDRESS);
1076 ret = i40e_aq_mac_address_write(hw,
1077 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1078 hw->mac.addr, NULL);
1080 device_printf(dev, "LLA address"
1081 "change failed!!\n");
1086 /* Set the various hardware offload abilities */
1087 ifp->if_hwassist = 0;
1088 if (ifp->if_capenable & IFCAP_TSO)
1089 ifp->if_hwassist |= CSUM_TSO;
1090 if (ifp->if_capenable & IFCAP_TXCSUM)
1091 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1092 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1093 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1095 /* Set up the device filtering */
1096 bzero(&filter, sizeof(filter));
1097 filter.enable_ethtype = TRUE;
1098 filter.enable_macvlan = TRUE;
1100 filter.enable_fdir = TRUE;
1102 if (i40e_set_filter_control(hw, &filter))
1103 device_printf(dev, "set_filter_control() failed\n");
1106 ixl_config_rss(vsi);
1109 ** Prepare the VSI: rings, hmc contexts, etc...
1111 if (ixl_initialize_vsi(vsi)) {
1112 device_printf(dev, "initialize vsi failed!!\n");
1116 /* Add protocol filters to list */
1117 ixl_init_filters(vsi);
1119 /* Setup vlan's if needed */
1120 ixl_setup_vlan_filters(vsi);
1122 /* Start the local timer */
1123 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1125 /* Set up MSI/X routing and the ITR settings */
1126 if (ixl_enable_msix) {
1127 ixl_configure_msix(pf);
1128 ixl_configure_itr(pf);
1130 ixl_configure_legacy(pf);
1132 ixl_enable_rings(vsi);
1134 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1136 /* Set MTU in hardware*/
1137 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1140 device_printf(vsi->dev,
1141 "aq_set_mac_config in init error, code %d\n",
1144 /* And now turn on interrupts */
1145 ixl_enable_intr(vsi);
1147 /* Now inform the stack we're ready */
1148 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1149 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1157 struct ixl_pf *pf = arg;
1160 ixl_init_locked(pf);
1167 ** MSIX Interrupt Handlers and Tasklets
1171 ixl_handle_que(void *context, int pending)
1173 struct ixl_queue *que = context;
1174 struct ixl_vsi *vsi = que->vsi;
1175 struct i40e_hw *hw = vsi->hw;
1176 struct tx_ring *txr = &que->txr;
1177 struct ifnet *ifp = vsi->ifp;
1180 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1181 more = ixl_rxeof(que, IXL_RX_LIMIT);
1184 if (!drbr_empty(ifp, txr->br))
1185 ixl_mq_start_locked(ifp, txr);
1188 taskqueue_enqueue(que->tq, &que->task);
1193 /* Reenable this interrupt - hmmm */
1194 ixl_enable_queue(hw, que->me);
1199 /*********************************************************************
1201 * Legacy Interrupt Service routine
1203 **********************************************************************/
1207 struct ixl_pf *pf = arg;
1208 struct i40e_hw *hw = &pf->hw;
1209 struct ixl_vsi *vsi = &pf->vsi;
1210 struct ixl_queue *que = vsi->queues;
1211 struct ifnet *ifp = vsi->ifp;
1212 struct tx_ring *txr = &que->txr;
1213 u32 reg, icr0, mask;
1214 bool more_tx, more_rx;
1218 /* Protect against spurious interrupts */
1219 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1222 icr0 = rd32(hw, I40E_PFINT_ICR0);
1224 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1225 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1226 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1228 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1230 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1231 taskqueue_enqueue(pf->tq, &pf->adminq);
1235 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1238 more_tx = ixl_txeof(que);
1239 if (!drbr_empty(vsi->ifp, txr->br))
1243 /* re-enable other interrupt causes */
1244 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1246 /* And now the queues */
1247 reg = rd32(hw, I40E_QINT_RQCTL(0));
1248 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1249 wr32(hw, I40E_QINT_RQCTL(0), reg);
1251 reg = rd32(hw, I40E_QINT_TQCTL(0));
1252 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1253 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1254 wr32(hw, I40E_QINT_TQCTL(0), reg);
1256 ixl_enable_legacy(hw);
1262 /*********************************************************************
1264 * MSIX VSI Interrupt Service routine
1266 **********************************************************************/
1268 ixl_msix_que(void *arg)
1270 struct ixl_queue *que = arg;
1271 struct ixl_vsi *vsi = que->vsi;
1272 struct i40e_hw *hw = vsi->hw;
1273 struct tx_ring *txr = &que->txr;
1274 bool more_tx, more_rx;
1276 /* Protect against spurious interrupts */
1277 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1282 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1285 more_tx = ixl_txeof(que);
1287 ** Make certain that if the stack
1288 ** has anything queued the task gets
1289 ** scheduled to handle it.
1291 if (!drbr_empty(vsi->ifp, txr->br))
1295 ixl_set_queue_rx_itr(que);
1296 ixl_set_queue_tx_itr(que);
1298 if (more_tx || more_rx)
1299 taskqueue_enqueue(que->tq, &que->task);
1301 ixl_enable_queue(hw, que->me);
1307 /*********************************************************************
1309 * MSIX Admin Queue Interrupt Service routine
1311 **********************************************************************/
1313 ixl_msix_adminq(void *arg)
1315 struct ixl_pf *pf = arg;
1316 struct i40e_hw *hw = &pf->hw;
1321 reg = rd32(hw, I40E_PFINT_ICR0);
1322 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1324 /* Check on the cause */
1325 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1326 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1328 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1329 ixl_handle_mdd_event(pf);
1330 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1333 if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1334 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1336 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1337 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1338 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1340 taskqueue_enqueue(pf->tq, &pf->adminq);
1344 /*********************************************************************
1346 * Media Ioctl callback
1348 * This routine is called whenever the user queries the status of
1349 * the interface using ifconfig.
1351 **********************************************************************/
1353 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1355 struct ixl_vsi *vsi = ifp->if_softc;
1356 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1357 struct i40e_hw *hw = &pf->hw;
1359 INIT_DEBUGOUT("ixl_media_status: begin");
1362 ixl_update_link_status(pf);
1364 ifmr->ifm_status = IFM_AVALID;
1365 ifmr->ifm_active = IFM_ETHER;
1367 if (!vsi->link_up) {
1372 ifmr->ifm_status |= IFM_ACTIVE;
1373 /* Hardware is always full-duplex */
1374 ifmr->ifm_active |= IFM_FDX;
1376 switch (hw->phy.link_info.phy_type) {
1378 case I40E_PHY_TYPE_100BASE_TX:
1379 ifmr->ifm_active |= IFM_100_TX;
1382 case I40E_PHY_TYPE_1000BASE_T:
1383 ifmr->ifm_active |= IFM_1000_T;
1385 case I40E_PHY_TYPE_1000BASE_SX:
1386 ifmr->ifm_active |= IFM_1000_SX;
1388 case I40E_PHY_TYPE_1000BASE_LX:
1389 ifmr->ifm_active |= IFM_1000_LX;
1392 case I40E_PHY_TYPE_10GBASE_CR1:
1393 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1394 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1395 /* Using this until a real KR media type */
1396 case I40E_PHY_TYPE_10GBASE_KR:
1397 case I40E_PHY_TYPE_10GBASE_KX4:
1398 ifmr->ifm_active |= IFM_10G_TWINAX;
1400 case I40E_PHY_TYPE_10GBASE_SR:
1401 ifmr->ifm_active |= IFM_10G_SR;
1403 case I40E_PHY_TYPE_10GBASE_LR:
1404 ifmr->ifm_active |= IFM_10G_LR;
1406 case I40E_PHY_TYPE_10GBASE_T:
1407 ifmr->ifm_active |= IFM_10G_T;
1410 case I40E_PHY_TYPE_40GBASE_CR4:
1411 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1412 ifmr->ifm_active |= IFM_40G_CR4;
1414 case I40E_PHY_TYPE_40GBASE_SR4:
1415 ifmr->ifm_active |= IFM_40G_SR4;
1417 case I40E_PHY_TYPE_40GBASE_LR4:
1418 ifmr->ifm_active |= IFM_40G_LR4;
1421 ** Set these to CR4 because OS does not
1422 ** have types available yet.
1424 case I40E_PHY_TYPE_40GBASE_KR4:
1425 case I40E_PHY_TYPE_XLAUI:
1426 case I40E_PHY_TYPE_XLPPI:
1427 case I40E_PHY_TYPE_40GBASE_AOC:
1428 ifmr->ifm_active |= IFM_40G_CR4;
1431 ifmr->ifm_active |= IFM_UNKNOWN;
1434 /* Report flow control status as well */
1435 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1436 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1437 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1438 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1445 /*********************************************************************
1447 * Media Ioctl callback
1449 * This routine is called when the user changes speed/duplex using
1450 * media/mediopt option with ifconfig.
1452 **********************************************************************/
1454 ixl_media_change(struct ifnet * ifp)
1456 struct ixl_vsi *vsi = ifp->if_softc;
1457 struct ifmedia *ifm = &vsi->media;
1459 INIT_DEBUGOUT("ixl_media_change: begin");
1461 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1464 if_printf(ifp, "Media change is currently not supported.\n");
1472 ** ATR: Application Targetted Receive - creates a filter
1473 ** based on TX flow info that will keep the receive
1474 ** portion of the flow on the same queue. Based on the
1475 ** implementation this is only available for TCP connections
1478 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1480 struct ixl_vsi *vsi = que->vsi;
1481 struct tx_ring *txr = &que->txr;
1482 struct i40e_filter_program_desc *FDIR;
1486 /* check if ATR is enabled and sample rate */
1487 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1490 ** We sample all TCP SYN/FIN packets,
1491 ** or at the selected sample rate
1494 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1495 (txr->atr_count < txr->atr_rate))
1499 /* Get a descriptor to use */
1500 idx = txr->next_avail;
1501 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1502 if (++idx == que->num_desc)
1505 txr->next_avail = idx;
1507 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1508 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1510 ptype |= (etype == ETHERTYPE_IP) ?
1511 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1512 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1513 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1514 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1516 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1518 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1521 ** We use the TCP TH_FIN as a trigger to remove
1522 ** the filter, otherwise its an update.
1524 dtype |= (th->th_flags & TH_FIN) ?
1525 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1526 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1527 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1528 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1530 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1531 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1533 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1534 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1536 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1537 FDIR->dtype_cmd_cntindex = htole32(dtype);
1544 ixl_set_promisc(struct ixl_vsi *vsi)
1546 struct ifnet *ifp = vsi->ifp;
1547 struct i40e_hw *hw = vsi->hw;
1549 bool uni = FALSE, multi = FALSE;
1551 if (ifp->if_flags & IFF_ALLMULTI)
1553 else { /* Need to count the multicast addresses */
1554 struct ifmultiaddr *ifma;
1555 if_maddr_rlock(ifp);
1556 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1557 if (ifma->ifma_addr->sa_family != AF_LINK)
1559 if (mcnt == MAX_MULTICAST_ADDR)
1563 if_maddr_runlock(ifp);
1566 if (mcnt >= MAX_MULTICAST_ADDR)
1568 if (ifp->if_flags & IFF_PROMISC)
1571 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1572 vsi->seid, uni, NULL);
1573 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1574 vsi->seid, multi, NULL);
1578 /*********************************************************************
1581 * Routines for multicast and vlan filter management.
1583 *********************************************************************/
1585 ixl_add_multi(struct ixl_vsi *vsi)
1587 struct ifmultiaddr *ifma;
1588 struct ifnet *ifp = vsi->ifp;
1589 struct i40e_hw *hw = vsi->hw;
1590 int mcnt = 0, flags;
1592 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1594 if_maddr_rlock(ifp);
1596 ** First just get a count, to decide if we
1597 ** we simply use multicast promiscuous.
1599 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1600 if (ifma->ifma_addr->sa_family != AF_LINK)
1604 if_maddr_runlock(ifp);
1606 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1607 /* delete existing MC filters */
1608 ixl_del_hw_filters(vsi, mcnt);
1609 i40e_aq_set_vsi_multicast_promiscuous(hw,
1610 vsi->seid, TRUE, NULL);
1615 if_maddr_rlock(ifp);
1616 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1617 if (ifma->ifma_addr->sa_family != AF_LINK)
1619 ixl_add_mc_filter(vsi,
1620 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1623 if_maddr_runlock(ifp);
1625 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1626 ixl_add_hw_filters(vsi, flags, mcnt);
1629 IOCTL_DEBUGOUT("ixl_add_multi: end");
1634 ixl_del_multi(struct ixl_vsi *vsi)
1636 struct ifnet *ifp = vsi->ifp;
1637 struct ifmultiaddr *ifma;
1638 struct ixl_mac_filter *f;
1642 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1644 /* Search for removed multicast addresses */
1645 if_maddr_rlock(ifp);
1646 SLIST_FOREACH(f, &vsi->ftl, next) {
1647 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1649 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1650 if (ifma->ifma_addr->sa_family != AF_LINK)
1652 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1653 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1658 if (match == FALSE) {
1659 f->flags |= IXL_FILTER_DEL;
1664 if_maddr_runlock(ifp);
1667 ixl_del_hw_filters(vsi, mcnt);
1671 /*********************************************************************
1674 * This routine checks for link status,updates statistics,
1675 * and runs the watchdog check.
1677 **********************************************************************/
1680 ixl_local_timer(void *arg)
1682 struct ixl_pf *pf = arg;
1683 struct i40e_hw *hw = &pf->hw;
1684 struct ixl_vsi *vsi = &pf->vsi;
1685 struct ixl_queue *que = vsi->queues;
1686 device_t dev = pf->dev;
1690 mtx_assert(&pf->pf_mtx, MA_OWNED);
1692 /* Fire off the adminq task */
1693 taskqueue_enqueue(pf->tq, &pf->adminq);
1696 ixl_update_stats_counters(pf);
1699 ** Check status of the queues
1701 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1702 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1704 for (int i = 0; i < vsi->num_queues; i++,que++) {
1705 /* Any queues with outstanding work get a sw irq */
1707 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1709 ** Each time txeof runs without cleaning, but there
1710 ** are uncleaned descriptors it increments busy. If
1711 ** we get to 5 we declare it hung.
1713 if (que->busy == IXL_QUEUE_HUNG) {
1715 /* Mark the queue as inactive */
1716 vsi->active_queues &= ~((u64)1 << que->me);
1719 /* Check if we've come back from hung */
1720 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1721 vsi->active_queues |= ((u64)1 << que->me);
1723 if (que->busy >= IXL_MAX_TX_BUSY) {
1725 device_printf(dev,"Warning queue %d "
1726 "appears to be hung!\n", i);
1728 que->busy = IXL_QUEUE_HUNG;
1732 /* Only reinit if all queues show hung */
1733 if (hung == vsi->num_queues)
1736 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1740 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1741 ixl_init_locked(pf);
1745 ** Note: this routine updates the OS on the link state
1746 ** the real check of the hardware only happens with
1747 ** a link interrupt.
1750 ixl_update_link_status(struct ixl_pf *pf)
1752 struct ixl_vsi *vsi = &pf->vsi;
1753 struct i40e_hw *hw = &pf->hw;
1754 struct ifnet *ifp = vsi->ifp;
1755 device_t dev = pf->dev;
1759 if (vsi->link_active == FALSE) {
1760 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1761 pf->fc = hw->fc.current_mode;
1763 device_printf(dev,"Link is up %d Gbps %s,"
1764 " Flow Control: %s\n",
1765 ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1766 "Full Duplex", ixl_fc_string[pf->fc]);
1768 vsi->link_active = TRUE;
1770 ** Warn user if link speed on NPAR enabled
1771 ** partition is not at least 10GB
1773 if (hw->func_caps.npar_enable &&
1774 (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1775 hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1776 device_printf(dev, "The partition detected link"
1777 "speed that is less than 10Gbps\n");
1778 if_link_state_change(ifp, LINK_STATE_UP);
1780 } else { /* Link down */
1781 if (vsi->link_active == TRUE) {
1783 device_printf(dev,"Link is Down\n");
1784 if_link_state_change(ifp, LINK_STATE_DOWN);
1785 vsi->link_active = FALSE;
1792 /*********************************************************************
1794 * This routine disables all traffic on the adapter by issuing a
1795 * global reset on the MAC and deallocates TX/RX buffers.
1797 **********************************************************************/
1800 ixl_stop(struct ixl_pf *pf)
1802 struct ixl_vsi *vsi = &pf->vsi;
1803 struct ifnet *ifp = vsi->ifp;
1805 mtx_assert(&pf->pf_mtx, MA_OWNED);
1807 INIT_DEBUGOUT("ixl_stop: begin\n");
1808 ixl_disable_intr(vsi);
1809 ixl_disable_rings(vsi);
1811 /* Tell the stack that the interface is no longer active */
1812 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1814 /* Stop the local timer */
1815 callout_stop(&pf->timer);
1821 /*********************************************************************
1823 * Setup MSIX Interrupt resources and handlers for the VSI
1825 **********************************************************************/
1827 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1829 device_t dev = pf->dev;
1830 struct ixl_vsi *vsi = &pf->vsi;
1831 struct ixl_queue *que = vsi->queues;
1836 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1837 &rid, RF_SHAREABLE | RF_ACTIVE);
1838 if (pf->res == NULL) {
1839 device_printf(dev,"Unable to allocate"
1840 " bus resource: vsi legacy/msi interrupt\n");
1844 /* Set the handler function */
1845 error = bus_setup_intr(dev, pf->res,
1846 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1847 ixl_intr, pf, &pf->tag);
1850 device_printf(dev, "Failed to register legacy/msi handler");
1853 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1854 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1855 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1856 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1857 taskqueue_thread_enqueue, &que->tq);
1858 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1859 device_get_nameunit(dev));
1860 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1861 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1862 taskqueue_thread_enqueue, &pf->tq);
1863 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1864 device_get_nameunit(dev));
1870 /*********************************************************************
1872 * Setup MSIX Interrupt resources and handlers for the VSI
1874 **********************************************************************/
1876 ixl_assign_vsi_msix(struct ixl_pf *pf)
1878 device_t dev = pf->dev;
1879 struct ixl_vsi *vsi = &pf->vsi;
1880 struct ixl_queue *que = vsi->queues;
1881 struct tx_ring *txr;
1882 int error, rid, vector = 0;
1887 /* Admin Que is vector 0*/
1889 pf->res = bus_alloc_resource_any(dev,
1890 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1892 device_printf(dev,"Unable to allocate"
1893 " bus resource: Adminq interrupt [%d]\n", rid);
1896 /* Set the adminq vector and handler */
1897 error = bus_setup_intr(dev, pf->res,
1898 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1899 ixl_msix_adminq, pf, &pf->tag);
1902 device_printf(dev, "Failed to register Admin que handler");
1905 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1906 pf->admvec = vector;
1907 /* Tasklet for Admin Queue */
1908 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1909 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1910 taskqueue_thread_enqueue, &pf->tq);
1911 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1912 device_get_nameunit(pf->dev));
1915 /* Now set up the stations */
1916 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1920 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1921 RF_SHAREABLE | RF_ACTIVE);
1922 if (que->res == NULL) {
1923 device_printf(dev,"Unable to allocate"
1924 " bus resource: que interrupt [%d]\n", vector);
1927 /* Set the handler function */
1928 error = bus_setup_intr(dev, que->res,
1929 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1930 ixl_msix_que, que, &que->tag);
1933 device_printf(dev, "Failed to register que handler");
1936 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1937 /* Bind the vector to a CPU */
1939 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1941 bus_bind_intr(dev, que->res, cpu_id);
1943 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1944 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1945 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1946 taskqueue_thread_enqueue, &que->tq);
1948 CPU_ZERO(&cpu_mask);
1949 CPU_SET(cpu_id, &cpu_mask);
1950 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1951 &cpu_mask, "%s (bucket %d)",
1952 device_get_nameunit(dev), cpu_id);
1954 taskqueue_start_threads(&que->tq, 1, PI_NET,
1955 "%s que", device_get_nameunit(dev));
1964 * Allocate MSI/X vectors
1967 ixl_init_msix(struct ixl_pf *pf)
1969 device_t dev = pf->dev;
1970 int rid, want, vectors, queues, available;
1972 /* Override by tuneable */
1973 if (ixl_enable_msix == 0)
1977 ** When used in a virtualized environment
1978 ** PCI BUSMASTER capability may not be set
1979 ** so explicity set it here and rewrite
1980 ** the ENABLE in the MSIX control register
1981 ** at this point to cause the host to
1982 ** successfully initialize us.
1987 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1988 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1989 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1990 pci_find_cap(dev, PCIY_MSIX, &rid);
1991 rid += PCIR_MSIX_CTRL;
1992 msix_ctrl = pci_read_config(dev, rid, 2);
1993 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1994 pci_write_config(dev, rid, msix_ctrl, 2);
1997 /* First try MSI/X */
1998 rid = PCIR_BAR(IXL_BAR);
1999 pf->msix_mem = bus_alloc_resource_any(dev,
2000 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2001 if (!pf->msix_mem) {
2002 /* May not be enabled */
2003 device_printf(pf->dev,
2004 "Unable to map MSIX table \n");
2008 available = pci_msix_count(dev);
2009 if (available == 0) { /* system has msix disabled */
2010 bus_release_resource(dev, SYS_RES_MEMORY,
2012 pf->msix_mem = NULL;
2016 /* Figure out a reasonable auto config value */
2017 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2019 /* Override with hardcoded value if sane */
2020 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2021 queues = ixl_max_queues;
2024 /* If we're doing RSS, clamp at the number of RSS buckets */
2025 if (queues > rss_getnumbuckets())
2026 queues = rss_getnumbuckets();
2030 ** Want one vector (RX/TX pair) per queue
2031 ** plus an additional for the admin queue.
2034 if (want <= available) /* Have enough */
2037 device_printf(pf->dev,
2038 "MSIX Configuration Problem, "
2039 "%d vectors available but %d wanted!\n",
2041 return (0); /* Will go to Legacy setup */
2044 if (pci_alloc_msix(dev, &vectors) == 0) {
2045 device_printf(pf->dev,
2046 "Using MSIX interrupts with %d vectors\n", vectors);
2048 pf->vsi.num_queues = queues;
2051 * If we're doing RSS, the number of queues needs to
2052 * match the number of RSS buckets that are configured.
2054 * + If there's more queues than RSS buckets, we'll end
2055 * up with queues that get no traffic.
2057 * + If there's more RSS buckets than queues, we'll end
2058 * up having multiple RSS buckets map to the same queue,
2059 * so there'll be some contention.
2061 if (queues != rss_getnumbuckets()) {
2063 "%s: queues (%d) != RSS buckets (%d)"
2064 "; performance will be impacted.\n",
2065 __func__, queues, rss_getnumbuckets());
2071 vectors = pci_msi_count(dev);
2072 pf->vsi.num_queues = 1;
2075 ixl_enable_msix = 0;
2076 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2077 device_printf(pf->dev,"Using an MSI interrupt\n");
2080 device_printf(pf->dev,"Using a Legacy interrupt\n");
2087 * Plumb MSI/X vectors
2090 ixl_configure_msix(struct ixl_pf *pf)
2092 struct i40e_hw *hw = &pf->hw;
2093 struct ixl_vsi *vsi = &pf->vsi;
2097 /* First set up the adminq - vector 0 */
2098 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2099 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2101 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2102 I40E_PFINT_ICR0_ENA_GRST_MASK |
2103 I40E_PFINT_ICR0_HMC_ERR_MASK |
2104 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2105 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2106 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2107 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2108 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2110 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2111 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2113 wr32(hw, I40E_PFINT_DYN_CTL0,
2114 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2115 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2117 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2119 /* Next configure the queues */
2120 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2121 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2122 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2124 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2125 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2126 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2127 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2128 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2129 wr32(hw, I40E_QINT_RQCTL(i), reg);
2131 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2132 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2133 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2134 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2135 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2136 if (i == (vsi->num_queues - 1))
2137 reg |= (IXL_QUEUE_EOL
2138 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2139 wr32(hw, I40E_QINT_TQCTL(i), reg);
2144 * Configure for MSI single vector operation
2147 ixl_configure_legacy(struct ixl_pf *pf)
2149 struct i40e_hw *hw = &pf->hw;
2153 wr32(hw, I40E_PFINT_ITR0(0), 0);
2154 wr32(hw, I40E_PFINT_ITR0(1), 0);
2157 /* Setup "other" causes */
2158 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2159 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2160 | I40E_PFINT_ICR0_ENA_GRST_MASK
2161 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2162 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2163 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2164 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2165 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2166 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2167 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2169 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2171 /* SW_ITR_IDX = 0, but don't change INTENA */
2172 wr32(hw, I40E_PFINT_DYN_CTL0,
2173 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2174 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2175 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2176 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2178 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2179 wr32(hw, I40E_PFINT_LNKLST0, 0);
2181 /* Associate the queue pair to the vector and enable the q int */
2182 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2183 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2184 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2185 wr32(hw, I40E_QINT_RQCTL(0), reg);
2187 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2188 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2189 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2190 wr32(hw, I40E_QINT_TQCTL(0), reg);
2192 /* Next enable the queue pair */
2193 reg = rd32(hw, I40E_QTX_ENA(0));
2194 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2195 wr32(hw, I40E_QTX_ENA(0), reg);
2197 reg = rd32(hw, I40E_QRX_ENA(0));
2198 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2199 wr32(hw, I40E_QRX_ENA(0), reg);
2204 * Set the Initial ITR state
2207 ixl_configure_itr(struct ixl_pf *pf)
2209 struct i40e_hw *hw = &pf->hw;
2210 struct ixl_vsi *vsi = &pf->vsi;
2211 struct ixl_queue *que = vsi->queues;
2213 vsi->rx_itr_setting = ixl_rx_itr;
2214 if (ixl_dynamic_rx_itr)
2215 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2216 vsi->tx_itr_setting = ixl_tx_itr;
2217 if (ixl_dynamic_tx_itr)
2218 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2220 for (int i = 0; i < vsi->num_queues; i++, que++) {
2221 struct tx_ring *txr = &que->txr;
2222 struct rx_ring *rxr = &que->rxr;
2224 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2225 vsi->rx_itr_setting);
2226 rxr->itr = vsi->rx_itr_setting;
2227 rxr->latency = IXL_AVE_LATENCY;
2228 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2229 vsi->tx_itr_setting);
2230 txr->itr = vsi->tx_itr_setting;
2231 txr->latency = IXL_AVE_LATENCY;
2237 ixl_allocate_pci_resources(struct ixl_pf *pf)
2240 device_t dev = pf->dev;
2243 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2246 if (!(pf->pci_mem)) {
2247 device_printf(dev,"Unable to allocate bus resource: memory\n");
2251 pf->osdep.mem_bus_space_tag =
2252 rman_get_bustag(pf->pci_mem);
2253 pf->osdep.mem_bus_space_handle =
2254 rman_get_bushandle(pf->pci_mem);
2255 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2256 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2257 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2259 pf->hw.back = &pf->osdep;
2262 ** Now setup MSI or MSI/X, should
2263 ** return us the number of supported
2264 ** vectors. (Will be 1 for MSI)
2266 pf->msix = ixl_init_msix(pf);
2271 ixl_free_pci_resources(struct ixl_pf * pf)
2273 struct ixl_vsi *vsi = &pf->vsi;
2274 struct ixl_queue *que = vsi->queues;
2275 device_t dev = pf->dev;
2278 memrid = PCIR_BAR(IXL_BAR);
2280 /* We may get here before stations are setup */
2281 if ((!ixl_enable_msix) || (que == NULL))
2285 ** Release all msix VSI resources:
2287 for (int i = 0; i < vsi->num_queues; i++, que++) {
2288 rid = que->msix + 1;
2289 if (que->tag != NULL) {
2290 bus_teardown_intr(dev, que->res, que->tag);
2293 if (que->res != NULL)
2294 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2298 /* Clean the AdminQ interrupt last */
2299 if (pf->admvec) /* we are doing MSIX */
2300 rid = pf->admvec + 1;
2302 (pf->msix != 0) ? (rid = 1):(rid = 0);
2304 if (pf->tag != NULL) {
2305 bus_teardown_intr(dev, pf->res, pf->tag);
2308 if (pf->res != NULL)
2309 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2312 pci_release_msi(dev);
2314 if (pf->msix_mem != NULL)
2315 bus_release_resource(dev, SYS_RES_MEMORY,
2316 memrid, pf->msix_mem);
2318 if (pf->pci_mem != NULL)
2319 bus_release_resource(dev, SYS_RES_MEMORY,
2320 PCIR_BAR(0), pf->pci_mem);
2326 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2328 /* Display supported media types */
2329 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2330 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2332 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2333 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2335 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2336 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2337 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2338 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2339 phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2340 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2341 phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2342 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2343 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2345 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2346 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2347 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2348 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2349 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2350 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2352 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2353 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2354 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2355 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2356 phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2357 /* KR4 uses CR4 until the OS has the real media type */
2358 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2359 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2361 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2362 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2363 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2364 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2367 /*********************************************************************
2369 * Setup networking device structure and register an interface.
2371 **********************************************************************/
2373 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2376 struct i40e_hw *hw = vsi->hw;
2377 struct ixl_queue *que = vsi->queues;
2378 struct i40e_aq_get_phy_abilities_resp abilities;
2379 enum i40e_status_code aq_error = 0;
2381 INIT_DEBUGOUT("ixl_setup_interface: begin");
2383 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2385 device_printf(dev, "can not allocate ifnet structure\n");
2388 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2389 ifp->if_mtu = ETHERMTU;
2390 ifp->if_baudrate = 4000000000; // ??
2391 ifp->if_init = ixl_init;
2392 ifp->if_softc = vsi;
2393 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2394 ifp->if_ioctl = ixl_ioctl;
2396 #if __FreeBSD_version >= 1100036
2397 if_setgetcounterfn(ifp, ixl_get_counter);
2400 ifp->if_transmit = ixl_mq_start;
2402 ifp->if_qflush = ixl_qflush;
2404 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2406 vsi->max_frame_size =
2407 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2408 + ETHER_VLAN_ENCAP_LEN;
2411 * Tell the upper layer(s) we support long frames.
2413 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2415 ifp->if_capabilities |= IFCAP_HWCSUM;
2416 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2417 ifp->if_capabilities |= IFCAP_TSO;
2418 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2419 ifp->if_capabilities |= IFCAP_LRO;
2421 /* VLAN capabilties */
2422 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2425 | IFCAP_VLAN_HWCSUM;
2426 ifp->if_capenable = ifp->if_capabilities;
2429 ** Don't turn this on by default, if vlans are
2430 ** created on another pseudo device (eg. lagg)
2431 ** then vlan events are not passed thru, breaking
2432 ** operation, but with HW FILTER off it works. If
2433 ** using vlans directly on the ixl driver you can
2434 ** enable this and get full hardware tag filtering.
2436 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2439 * Specify the media types supported by this adapter and register
2440 * callbacks to update media and link information
2442 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2445 aq_error = i40e_aq_get_phy_capabilities(hw,
2446 FALSE, TRUE, &abilities, NULL);
2447 /* May need delay to detect fiber correctly */
2448 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2449 i40e_msec_delay(200);
2450 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2451 TRUE, &abilities, NULL);
2454 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2455 device_printf(dev, "Unknown PHY type detected!\n");
2458 "Error getting supported media types, err %d,"
2459 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2463 ixl_add_ifmedia(vsi, abilities.phy_type);
2465 /* Use autoselect media by default */
2466 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2467 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2469 ether_ifattach(ifp, hw->mac.addr);
2475 ixl_config_link(struct i40e_hw *hw)
2479 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2480 check = i40e_get_link_status(hw);
2482 printf("Link is %s\n", check ? "up":"down");
2487 /*********************************************************************
2489 * Get Firmware Switch configuration
2490 * - this will need to be more robust when more complex
2491 * switch configurations are enabled.
2493 **********************************************************************/
2495 ixl_switch_config(struct ixl_pf *pf)
2497 struct i40e_hw *hw = &pf->hw;
2498 struct ixl_vsi *vsi = &pf->vsi;
2499 device_t dev = vsi->dev;
2500 struct i40e_aqc_get_switch_config_resp *sw_config;
2501 u8 aq_buf[I40E_AQ_LARGE_BUF];
2502 int ret = I40E_SUCCESS;
2505 memset(&aq_buf, 0, sizeof(aq_buf));
2506 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2507 ret = i40e_aq_get_switch_config(hw, sw_config,
2508 sizeof(aq_buf), &next, NULL);
2510 device_printf(dev,"aq_get_switch_config failed!!\n");
2514 printf("Switch config: header reported: %d in structure, %d total\n",
2515 sw_config->header.num_reported, sw_config->header.num_total);
2516 printf("type=%d seid=%d uplink=%d downlink=%d\n",
2517 sw_config->element[0].element_type,
2518 sw_config->element[0].seid,
2519 sw_config->element[0].uplink_seid,
2520 sw_config->element[0].downlink_seid);
2522 /* Simplified due to a single VSI at the moment */
2523 vsi->seid = sw_config->element[0].seid;
2527 /*********************************************************************
2529 * Initialize the VSI: this handles contexts, which means things
2530 * like the number of descriptors, buffer size,
2531 * plus we init the rings thru this function.
2533 **********************************************************************/
2535 ixl_initialize_vsi(struct ixl_vsi *vsi)
2537 struct ixl_queue *que = vsi->queues;
2538 device_t dev = vsi->dev;
2539 struct i40e_hw *hw = vsi->hw;
2540 struct i40e_vsi_context ctxt;
2543 memset(&ctxt, 0, sizeof(ctxt));
2544 ctxt.seid = vsi->seid;
2545 ctxt.pf_num = hw->pf_id;
2546 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2548 device_printf(dev,"get vsi params failed %x!!\n", err);
2552 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2553 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2554 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2555 ctxt.uplink_seid, ctxt.vsi_number,
2556 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2557 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2558 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2561 ** Set the queue and traffic class bits
2562 ** - when multiple traffic classes are supported
2563 ** this will need to be more robust.
2565 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2566 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2567 ctxt.info.queue_mapping[0] = 0;
2568 ctxt.info.tc_mapping[0] = 0x0800;
2570 /* Set VLAN receive stripping mode */
2571 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2572 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2573 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2574 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2576 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2578 /* Keep copy of VSI info in VSI for statistic counters */
2579 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2581 /* Reset VSI statistics */
2582 ixl_vsi_reset_stats(vsi);
2583 vsi->hw_filters_add = 0;
2584 vsi->hw_filters_del = 0;
2586 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2588 device_printf(dev,"update vsi params failed %x!!\n",
2589 hw->aq.asq_last_status);
2593 for (int i = 0; i < vsi->num_queues; i++, que++) {
2594 struct tx_ring *txr = &que->txr;
2595 struct rx_ring *rxr = &que->rxr;
2596 struct i40e_hmc_obj_txq tctx;
2597 struct i40e_hmc_obj_rxq rctx;
2602 /* Setup the HMC TX Context */
2603 size = que->num_desc * sizeof(struct i40e_tx_desc);
2604 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2605 tctx.new_context = 1;
2606 tctx.base = (txr->dma.pa/128);
2607 tctx.qlen = que->num_desc;
2609 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2610 /* Enable HEAD writeback */
2611 tctx.head_wb_ena = 1;
2612 tctx.head_wb_addr = txr->dma.pa +
2613 (que->num_desc * sizeof(struct i40e_tx_desc));
2614 tctx.rdylist_act = 0;
2615 err = i40e_clear_lan_tx_queue_context(hw, i);
2617 device_printf(dev, "Unable to clear TX context\n");
2620 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2622 device_printf(dev, "Unable to set TX context\n");
2625 /* Associate the ring with this PF */
2626 txctl = I40E_QTX_CTL_PF_QUEUE;
2627 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2628 I40E_QTX_CTL_PF_INDX_MASK);
2629 wr32(hw, I40E_QTX_CTL(i), txctl);
2632 /* Do ring (re)init */
2633 ixl_init_tx_ring(que);
2635 /* Next setup the HMC RX Context */
2636 if (vsi->max_frame_size <= 2048)
2637 rxr->mbuf_sz = MCLBYTES;
2639 rxr->mbuf_sz = MJUMPAGESIZE;
2641 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2643 /* Set up an RX context for the HMC */
2644 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2645 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2646 /* ignore header split for now */
2647 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2648 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2649 vsi->max_frame_size : max_rxmax;
2651 rctx.dsize = 1; /* do 32byte descriptors */
2652 rctx.hsplit_0 = 0; /* no HDR split initially */
2653 rctx.base = (rxr->dma.pa/128);
2654 rctx.qlen = que->num_desc;
2655 rctx.tphrdesc_ena = 1;
2656 rctx.tphwdesc_ena = 1;
2657 rctx.tphdata_ena = 0;
2658 rctx.tphhead_ena = 0;
2659 rctx.lrxqthresh = 2;
2666 err = i40e_clear_lan_rx_queue_context(hw, i);
2669 "Unable to clear RX context %d\n", i);
2672 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2674 device_printf(dev, "Unable to set RX context %d\n", i);
2677 err = ixl_init_rx_ring(que);
2679 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2682 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2684 /* preserve queue */
2685 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2686 struct netmap_adapter *na = NA(vsi->ifp);
2687 struct netmap_kring *kring = &na->rx_rings[i];
2688 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2689 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2691 #endif /* DEV_NETMAP */
2692 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2698 /*********************************************************************
2700 * Free all VSI structs.
2702 **********************************************************************/
2704 ixl_free_vsi(struct ixl_vsi *vsi)
2706 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2707 struct ixl_queue *que = vsi->queues;
2708 struct ixl_mac_filter *f;
2710 /* Free station queues */
2711 for (int i = 0; i < vsi->num_queues; i++, que++) {
2712 struct tx_ring *txr = &que->txr;
2713 struct rx_ring *rxr = &que->rxr;
2715 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2718 ixl_free_que_tx(que);
2720 i40e_free_dma_mem(&pf->hw, &txr->dma);
2722 IXL_TX_LOCK_DESTROY(txr);
2724 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2727 ixl_free_que_rx(que);
2729 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2731 IXL_RX_LOCK_DESTROY(rxr);
2734 free(vsi->queues, M_DEVBUF);
2736 /* Free VSI filter list */
2737 while (!SLIST_EMPTY(&vsi->ftl)) {
2738 f = SLIST_FIRST(&vsi->ftl);
2739 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2745 /*********************************************************************
2747 * Allocate memory for the VSI (virtual station interface) and their
2748 * associated queues, rings and the descriptors associated with each,
2749 * called only once at attach.
2751 **********************************************************************/
2753 ixl_setup_stations(struct ixl_pf *pf)
2755 device_t dev = pf->dev;
2756 struct ixl_vsi *vsi;
2757 struct ixl_queue *que;
2758 struct tx_ring *txr;
2759 struct rx_ring *rxr;
2761 int error = I40E_SUCCESS;
2764 vsi->back = (void *)pf;
2769 /* Get memory for the station queues */
2771 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2772 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2773 device_printf(dev, "Unable to allocate queue memory\n");
2778 for (int i = 0; i < vsi->num_queues; i++) {
2779 que = &vsi->queues[i];
2780 que->num_desc = ixl_ringsz;
2783 /* mark the queue as active */
2784 vsi->active_queues |= (u64)1 << que->me;
2787 txr->tail = I40E_QTX_TAIL(que->me);
2789 /* Initialize the TX lock */
2790 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2791 device_get_nameunit(dev), que->me);
2792 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2793 /* Create the TX descriptor ring */
2794 tsize = roundup2((que->num_desc *
2795 sizeof(struct i40e_tx_desc)) +
2796 sizeof(u32), DBA_ALIGN);
2797 if (i40e_allocate_dma_mem(&pf->hw,
2798 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2800 "Unable to allocate TX Descriptor memory\n");
2804 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2805 bzero((void *)txr->base, tsize);
2806 /* Now allocate transmit soft structs for the ring */
2807 if (ixl_allocate_tx_data(que)) {
2809 "Critical Failure setting up TX structures\n");
2813 /* Allocate a buf ring */
2814 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2815 M_WAITOK, &txr->mtx);
2816 if (txr->br == NULL) {
2818 "Critical Failure setting up TX buf ring\n");
2824 * Next the RX queues...
2826 rsize = roundup2(que->num_desc *
2827 sizeof(union i40e_rx_desc), DBA_ALIGN);
2830 rxr->tail = I40E_QRX_TAIL(que->me);
2832 /* Initialize the RX side lock */
2833 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2834 device_get_nameunit(dev), que->me);
2835 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2837 if (i40e_allocate_dma_mem(&pf->hw,
2838 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2840 "Unable to allocate RX Descriptor memory\n");
2844 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2845 bzero((void *)rxr->base, rsize);
2847 /* Allocate receive soft structs for the ring*/
2848 if (ixl_allocate_rx_data(que)) {
2850 "Critical Failure setting up receive structs\n");
2859 for (int i = 0; i < vsi->num_queues; i++) {
2860 que = &vsi->queues[i];
2864 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2866 i40e_free_dma_mem(&pf->hw, &txr->dma);
2874 ** Provide a update to the queue RX
2875 ** interrupt moderation value.
2878 ixl_set_queue_rx_itr(struct ixl_queue *que)
2880 struct ixl_vsi *vsi = que->vsi;
2881 struct i40e_hw *hw = vsi->hw;
2882 struct rx_ring *rxr = &que->rxr;
2888 /* Idle, do nothing */
2889 if (rxr->bytes == 0)
2892 if (ixl_dynamic_rx_itr) {
2893 rx_bytes = rxr->bytes/rxr->itr;
2896 /* Adjust latency range */
2897 switch (rxr->latency) {
2898 case IXL_LOW_LATENCY:
2899 if (rx_bytes > 10) {
2900 rx_latency = IXL_AVE_LATENCY;
2901 rx_itr = IXL_ITR_20K;
2904 case IXL_AVE_LATENCY:
2905 if (rx_bytes > 20) {
2906 rx_latency = IXL_BULK_LATENCY;
2907 rx_itr = IXL_ITR_8K;
2908 } else if (rx_bytes <= 10) {
2909 rx_latency = IXL_LOW_LATENCY;
2910 rx_itr = IXL_ITR_100K;
2913 case IXL_BULK_LATENCY:
2914 if (rx_bytes <= 20) {
2915 rx_latency = IXL_AVE_LATENCY;
2916 rx_itr = IXL_ITR_20K;
2921 rxr->latency = rx_latency;
2923 if (rx_itr != rxr->itr) {
2924 /* do an exponential smoothing */
2925 rx_itr = (10 * rx_itr * rxr->itr) /
2926 ((9 * rx_itr) + rxr->itr);
2927 rxr->itr = rx_itr & IXL_MAX_ITR;
2928 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2929 que->me), rxr->itr);
2931 } else { /* We may have have toggled to non-dynamic */
2932 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2933 vsi->rx_itr_setting = ixl_rx_itr;
2934 /* Update the hardware if needed */
2935 if (rxr->itr != vsi->rx_itr_setting) {
2936 rxr->itr = vsi->rx_itr_setting;
2937 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2938 que->me), rxr->itr);
2948 ** Provide a update to the queue TX
2949 ** interrupt moderation value.
2952 ixl_set_queue_tx_itr(struct ixl_queue *que)
2954 struct ixl_vsi *vsi = que->vsi;
2955 struct i40e_hw *hw = vsi->hw;
2956 struct tx_ring *txr = &que->txr;
2962 /* Idle, do nothing */
2963 if (txr->bytes == 0)
2966 if (ixl_dynamic_tx_itr) {
2967 tx_bytes = txr->bytes/txr->itr;
2970 switch (txr->latency) {
2971 case IXL_LOW_LATENCY:
2972 if (tx_bytes > 10) {
2973 tx_latency = IXL_AVE_LATENCY;
2974 tx_itr = IXL_ITR_20K;
2977 case IXL_AVE_LATENCY:
2978 if (tx_bytes > 20) {
2979 tx_latency = IXL_BULK_LATENCY;
2980 tx_itr = IXL_ITR_8K;
2981 } else if (tx_bytes <= 10) {
2982 tx_latency = IXL_LOW_LATENCY;
2983 tx_itr = IXL_ITR_100K;
2986 case IXL_BULK_LATENCY:
2987 if (tx_bytes <= 20) {
2988 tx_latency = IXL_AVE_LATENCY;
2989 tx_itr = IXL_ITR_20K;
2994 txr->latency = tx_latency;
2996 if (tx_itr != txr->itr) {
2997 /* do an exponential smoothing */
2998 tx_itr = (10 * tx_itr * txr->itr) /
2999 ((9 * tx_itr) + txr->itr);
3000 txr->itr = tx_itr & IXL_MAX_ITR;
3001 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3002 que->me), txr->itr);
3005 } else { /* We may have have toggled to non-dynamic */
3006 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3007 vsi->tx_itr_setting = ixl_tx_itr;
3008 /* Update the hardware if needed */
3009 if (txr->itr != vsi->tx_itr_setting) {
3010 txr->itr = vsi->tx_itr_setting;
3011 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3012 que->me), txr->itr);
3022 ixl_add_hw_stats(struct ixl_pf *pf)
3024 device_t dev = pf->dev;
3025 struct ixl_vsi *vsi = &pf->vsi;
3026 struct ixl_queue *queues = vsi->queues;
3027 struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3028 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3030 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3031 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3032 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3034 struct sysctl_oid *vsi_node, *queue_node;
3035 struct sysctl_oid_list *vsi_list, *queue_list;
3037 struct tx_ring *txr;
3038 struct rx_ring *rxr;
3040 /* Driver statistics */
3041 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3042 CTLFLAG_RD, &pf->watchdog_events,
3043 "Watchdog timeouts");
3044 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3045 CTLFLAG_RD, &pf->admin_irq,
3046 "Admin Queue IRQ Handled");
3048 /* VSI statistics */
3049 #define QUEUE_NAME_LEN 32
3050 char queue_namebuf[QUEUE_NAME_LEN];
3052 // ERJ: Only one vsi now, re-do when >1 VSI enabled
3053 // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3054 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3055 CTLFLAG_RD, NULL, "VSI-specific stats");
3056 vsi_list = SYSCTL_CHILDREN(vsi_node);
3058 ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3060 /* Queue statistics */
3061 for (int q = 0; q < vsi->num_queues; q++) {
3062 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3063 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3064 CTLFLAG_RD, NULL, "Queue #");
3065 queue_list = SYSCTL_CHILDREN(queue_node);
3067 txr = &(queues[q].txr);
3068 rxr = &(queues[q].rxr);
3070 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3071 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3072 "m_defrag() failed");
3073 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3074 CTLFLAG_RD, &(queues[q].dropped_pkts),
3075 "Driver dropped packets");
3076 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3077 CTLFLAG_RD, &(queues[q].irqs),
3078 "irqs on this queue");
3079 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3080 CTLFLAG_RD, &(queues[q].tso),
3082 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3083 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3084 "Driver tx dma failure in xmit");
3085 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3086 CTLFLAG_RD, &(txr->no_desc),
3087 "Queue No Descriptor Available");
3088 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3089 CTLFLAG_RD, &(txr->total_packets),
3090 "Queue Packets Transmitted");
3091 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3092 CTLFLAG_RD, &(txr->tx_bytes),
3093 "Queue Bytes Transmitted");
3094 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3095 CTLFLAG_RD, &(rxr->rx_packets),
3096 "Queue Packets Received");
3097 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3098 CTLFLAG_RD, &(rxr->rx_bytes),
3099 "Queue Bytes Received");
3103 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3107 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3108 struct sysctl_oid_list *child,
3109 struct i40e_eth_stats *eth_stats)
3111 struct ixl_sysctl_info ctls[] =
3113 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3114 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3115 "Unicast Packets Received"},
3116 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3117 "Multicast Packets Received"},
3118 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3119 "Broadcast Packets Received"},
3120 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3121 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3122 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3123 {ð_stats->tx_multicast, "mcast_pkts_txd",
3124 "Multicast Packets Transmitted"},
3125 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3126 "Broadcast Packets Transmitted"},
3131 struct ixl_sysctl_info *entry = ctls;
3132 while (entry->stat != 0)
3134 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3135 CTLFLAG_RD, entry->stat,
3136 entry->description);
3142 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3143 struct sysctl_oid_list *child,
3144 struct i40e_hw_port_stats *stats)
3146 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3147 CTLFLAG_RD, NULL, "Mac Statistics");
3148 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3150 struct i40e_eth_stats *eth_stats = &stats->eth;
3151 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3153 struct ixl_sysctl_info ctls[] =
3155 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3156 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3157 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3158 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3159 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3160 /* Packet Reception Stats */
3161 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3162 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3163 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3164 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3165 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3166 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3167 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3168 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3169 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3170 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3171 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3172 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3173 /* Packet Transmission Stats */
3174 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3175 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3176 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3177 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3178 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3179 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3180 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3182 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3183 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3184 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3185 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3190 struct ixl_sysctl_info *entry = ctls;
3191 while (entry->stat != 0)
3193 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3194 CTLFLAG_RD, entry->stat,
3195 entry->description);
3201 ** ixl_config_rss - setup RSS
3202 ** - note this is done for the single vsi
3204 static void ixl_config_rss(struct ixl_vsi *vsi)
3206 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3207 struct i40e_hw *hw = vsi->hw;
3209 u64 set_hena = 0, hena;
3212 u32 rss_hash_config;
3213 u32 rss_seed[IXL_KEYSZ];
3215 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3216 0x183cfd8c, 0xce880440, 0x580cbc3c,
3217 0x35897377, 0x328b25e1, 0x4fa98922,
3218 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3222 /* Fetch the configured RSS key */
3223 rss_getkey((uint8_t *) &rss_seed);
3226 /* Fill out hash function seed */
3227 for (i = 0; i < IXL_KEYSZ; i++)
3228 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3230 /* Enable PCTYPES for RSS: */
3232 rss_hash_config = rss_gethashconfig();
3233 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3234 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3235 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3236 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3237 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3238 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3239 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3240 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3241 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3242 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3243 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3244 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3245 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3246 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3249 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3250 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3251 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3252 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3253 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3254 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3255 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3256 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3257 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3258 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3259 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3261 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3262 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3264 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3265 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3267 /* Populate the LUT with max no. of queues in round robin fashion */
3268 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3269 if (j == vsi->num_queues)
3273 * Fetch the RSS bucket id for the given indirection entry.
3274 * Cap it at the number of configured buckets (which is
3277 que_id = rss_get_indirection_to_bucket(i);
3278 que_id = que_id % vsi->num_queues;
3282 /* lut = 4-byte sliding window of 4 lut entries */
3283 lut = (lut << 8) | (que_id &
3284 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3285 /* On i = 3, we have 4 entries in lut; write to the register */
3287 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3294 ** This routine is run via an vlan config EVENT,
3295 ** it enables us to use the HW Filter table since
3296 ** we can get the vlan id. This just creates the
3297 ** entry in the soft version of the VFTA, init will
3298 ** repopulate the real table.
3301 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3303 struct ixl_vsi *vsi = ifp->if_softc;
3304 struct i40e_hw *hw = vsi->hw;
3305 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3307 if (ifp->if_softc != arg) /* Not our event */
3310 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3315 ixl_add_filter(vsi, hw->mac.addr, vtag);
3320 ** This routine is run via an vlan
3321 ** unconfig EVENT, remove our entry
3322 ** in the soft vfta.
3325 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3327 struct ixl_vsi *vsi = ifp->if_softc;
3328 struct i40e_hw *hw = vsi->hw;
3329 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3331 if (ifp->if_softc != arg)
3334 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3339 ixl_del_filter(vsi, hw->mac.addr, vtag);
3344 ** This routine updates vlan filters, called by init
3345 ** it scans the filter table and then updates the hw
3346 ** after a soft reset.
3349 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3351 struct ixl_mac_filter *f;
3354 if (vsi->num_vlans == 0)
3357 ** Scan the filter list for vlan entries,
3358 ** mark them for addition and then call
3359 ** for the AQ update.
3361 SLIST_FOREACH(f, &vsi->ftl, next) {
3362 if (f->flags & IXL_FILTER_VLAN) {
3370 printf("setup vlan: no filters found!\n");
3373 flags = IXL_FILTER_VLAN;
3374 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3375 ixl_add_hw_filters(vsi, flags, cnt);
3380 ** Initialize filter list and add filters that the hardware
3381 ** needs to know about.
3384 ixl_init_filters(struct ixl_vsi *vsi)
3386 /* Add broadcast address */
3387 u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3388 ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3392 ** This routine adds mulicast filters
3395 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3397 struct ixl_mac_filter *f;
3399 /* Does one already exist */
3400 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3404 f = ixl_get_filter(vsi);
3406 printf("WARNING: no filter available!!\n");
3409 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3410 f->vlan = IXL_VLAN_ANY;
3411 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3418 ** This routine adds macvlan filters
3421 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3423 struct ixl_mac_filter *f, *tmp;
3424 device_t dev = vsi->dev;
3426 DEBUGOUT("ixl_add_filter: begin");
3428 /* Does one already exist */
3429 f = ixl_find_filter(vsi, macaddr, vlan);
3433 ** Is this the first vlan being registered, if so we
3434 ** need to remove the ANY filter that indicates we are
3435 ** not in a vlan, and replace that with a 0 filter.
3437 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3438 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3440 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3441 ixl_add_filter(vsi, macaddr, 0);
3445 f = ixl_get_filter(vsi);
3447 device_printf(dev, "WARNING: no filter available!!\n");
3450 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3452 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3453 if (f->vlan != IXL_VLAN_ANY)
3454 f->flags |= IXL_FILTER_VLAN;
3456 ixl_add_hw_filters(vsi, f->flags, 1);
3461 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3463 struct ixl_mac_filter *f;
3465 f = ixl_find_filter(vsi, macaddr, vlan);
3469 f->flags |= IXL_FILTER_DEL;
3470 ixl_del_hw_filters(vsi, 1);
3472 /* Check if this is the last vlan removal */
3473 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3474 /* Switch back to a non-vlan filter */
3475 ixl_del_filter(vsi, macaddr, 0);
3476 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3482 ** Find the filter with both matching mac addr and vlan id
3484 static struct ixl_mac_filter *
3485 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3487 struct ixl_mac_filter *f;
3490 SLIST_FOREACH(f, &vsi->ftl, next) {
3491 if (!cmp_etheraddr(f->macaddr, macaddr))
3493 if (f->vlan == vlan) {
3505 ** This routine takes additions to the vsi filter
3506 ** table and creates an Admin Queue call to create
3507 ** the filters in the hardware.
3510 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3512 struct i40e_aqc_add_macvlan_element_data *a, *b;
3513 struct ixl_mac_filter *f;
3514 struct i40e_hw *hw = vsi->hw;
3515 device_t dev = vsi->dev;
3518 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3519 M_DEVBUF, M_NOWAIT | M_ZERO);
3521 device_printf(dev, "add_hw_filters failed to get memory\n");
3526 ** Scan the filter list, each time we find one
3527 ** we add it to the admin queue array and turn off
3530 SLIST_FOREACH(f, &vsi->ftl, next) {
3531 if (f->flags == flags) {
3532 b = &a[j]; // a pox on fvl long names :)
3533 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3535 (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3536 b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3537 f->flags &= ~IXL_FILTER_ADD;
3544 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3546 device_printf(dev, "aq_add_macvlan err %d, "
3547 "aq_error %d\n", err, hw->aq.asq_last_status);
3549 vsi->hw_filters_add += j;
3556 ** This routine takes removals in the vsi filter
3557 ** table and creates an Admin Queue call to delete
3558 ** the filters in the hardware.
3561 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3563 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3564 struct i40e_hw *hw = vsi->hw;
3565 device_t dev = vsi->dev;
3566 struct ixl_mac_filter *f, *f_temp;
3569 DEBUGOUT("ixl_del_hw_filters: begin\n");
3571 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3572 M_DEVBUF, M_NOWAIT | M_ZERO);
3574 printf("del hw filter failed to get memory\n");
3578 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3579 if (f->flags & IXL_FILTER_DEL) {
3580 e = &d[j]; // a pox on fvl long names :)
3581 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3582 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3583 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3584 /* delete entry from vsi list */
3585 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3593 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3594 /* NOTE: returns ENOENT every time but seems to work fine,
3595 so we'll ignore that specific error. */
3596 // TODO: Does this still occur on current firmwares?
3597 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3599 for (int i = 0; i < j; i++)
3600 sc += (!d[i].error_code);
3601 vsi->hw_filters_del += sc;
3603 "Failed to remove %d/%d filters, aq error %d\n",
3604 j - sc, j, hw->aq.asq_last_status);
3606 vsi->hw_filters_del += j;
3610 DEBUGOUT("ixl_del_hw_filters: end\n");
3616 ixl_enable_rings(struct ixl_vsi *vsi)
3618 struct i40e_hw *hw = vsi->hw;
3621 for (int i = 0; i < vsi->num_queues; i++) {
3622 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3624 reg = rd32(hw, I40E_QTX_ENA(i));
3625 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3626 I40E_QTX_ENA_QENA_STAT_MASK;
3627 wr32(hw, I40E_QTX_ENA(i), reg);
3628 /* Verify the enable took */
3629 for (int j = 0; j < 10; j++) {
3630 reg = rd32(hw, I40E_QTX_ENA(i));
3631 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3633 i40e_msec_delay(10);
3635 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3636 printf("TX queue %d disabled!\n", i);
3638 reg = rd32(hw, I40E_QRX_ENA(i));
3639 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3640 I40E_QRX_ENA_QENA_STAT_MASK;
3641 wr32(hw, I40E_QRX_ENA(i), reg);
3642 /* Verify the enable took */
3643 for (int j = 0; j < 10; j++) {
3644 reg = rd32(hw, I40E_QRX_ENA(i));
3645 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3647 i40e_msec_delay(10);
3649 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3650 printf("RX queue %d disabled!\n", i);
3655 ixl_disable_rings(struct ixl_vsi *vsi)
3657 struct i40e_hw *hw = vsi->hw;
3660 for (int i = 0; i < vsi->num_queues; i++) {
3661 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3662 i40e_usec_delay(500);
3664 reg = rd32(hw, I40E_QTX_ENA(i));
3665 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3666 wr32(hw, I40E_QTX_ENA(i), reg);
3667 /* Verify the disable took */
3668 for (int j = 0; j < 10; j++) {
3669 reg = rd32(hw, I40E_QTX_ENA(i));
3670 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3672 i40e_msec_delay(10);
3674 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3675 printf("TX queue %d still enabled!\n", i);
3677 reg = rd32(hw, I40E_QRX_ENA(i));
3678 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3679 wr32(hw, I40E_QRX_ENA(i), reg);
3680 /* Verify the disable took */
3681 for (int j = 0; j < 10; j++) {
3682 reg = rd32(hw, I40E_QRX_ENA(i));
3683 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3685 i40e_msec_delay(10);
3687 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3688 printf("RX queue %d still enabled!\n", i);
3693 * ixl_handle_mdd_event
3695 * Called from interrupt handler to identify possibly malicious vfs
3696 * (But also detects events from the PF, as well)
3698 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3700 struct i40e_hw *hw = &pf->hw;
3701 device_t dev = pf->dev;
3702 bool mdd_detected = false;
3703 bool pf_mdd_detected = false;
3706 /* find what triggered the MDD event */
3707 reg = rd32(hw, I40E_GL_MDET_TX);
3708 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3709 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3710 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3711 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3712 I40E_GL_MDET_TX_EVENT_SHIFT;
3713 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3714 I40E_GL_MDET_TX_QUEUE_SHIFT;
3716 "Malicious Driver Detection event 0x%02x"
3717 " on TX queue %d pf number 0x%02x\n",
3718 event, queue, pf_num);
3719 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3720 mdd_detected = true;
3722 reg = rd32(hw, I40E_GL_MDET_RX);
3723 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3724 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3725 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3726 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3727 I40E_GL_MDET_RX_EVENT_SHIFT;
3728 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3729 I40E_GL_MDET_RX_QUEUE_SHIFT;
3731 "Malicious Driver Detection event 0x%02x"
3732 " on RX queue %d of function 0x%02x\n",
3733 event, queue, func);
3734 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3735 mdd_detected = true;
3739 reg = rd32(hw, I40E_PF_MDET_TX);
3740 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3741 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3743 "MDD TX event is for this function 0x%08x",
3745 pf_mdd_detected = true;
3747 reg = rd32(hw, I40E_PF_MDET_RX);
3748 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3749 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3751 "MDD RX event is for this function 0x%08x",
3753 pf_mdd_detected = true;
3757 /* re-enable mdd interrupt cause */
3758 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3759 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3760 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3765 ixl_enable_intr(struct ixl_vsi *vsi)
3767 struct i40e_hw *hw = vsi->hw;
3768 struct ixl_queue *que = vsi->queues;
3770 if (ixl_enable_msix) {
3771 ixl_enable_adminq(hw);
3772 for (int i = 0; i < vsi->num_queues; i++, que++)
3773 ixl_enable_queue(hw, que->me);
3775 ixl_enable_legacy(hw);
3779 ixl_disable_intr(struct ixl_vsi *vsi)
3781 struct i40e_hw *hw = vsi->hw;
3782 struct ixl_queue *que = vsi->queues;
3784 if (ixl_enable_msix) {
3785 ixl_disable_adminq(hw);
3786 for (int i = 0; i < vsi->num_queues; i++, que++)
3787 ixl_disable_queue(hw, que->me);
3789 ixl_disable_legacy(hw);
3793 ixl_enable_adminq(struct i40e_hw *hw)
3797 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3798 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3799 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3800 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3806 ixl_disable_adminq(struct i40e_hw *hw)
3810 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3811 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3817 ixl_enable_queue(struct i40e_hw *hw, int id)
3821 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3822 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3823 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3824 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3828 ixl_disable_queue(struct i40e_hw *hw, int id)
3832 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3833 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3839 ixl_enable_legacy(struct i40e_hw *hw)
3842 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3843 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3844 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3845 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3849 ixl_disable_legacy(struct i40e_hw *hw)
3853 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3854 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3860 ixl_update_stats_counters(struct ixl_pf *pf)
3862 struct i40e_hw *hw = &pf->hw;
3863 struct ixl_vsi *vsi = &pf->vsi;
3865 struct i40e_hw_port_stats *nsd = &pf->stats;
3866 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3868 /* Update hw stats */
3869 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3870 pf->stat_offsets_loaded,
3871 &osd->crc_errors, &nsd->crc_errors);
3872 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3873 pf->stat_offsets_loaded,
3874 &osd->illegal_bytes, &nsd->illegal_bytes);
3875 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3876 I40E_GLPRT_GORCL(hw->port),
3877 pf->stat_offsets_loaded,
3878 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3879 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3880 I40E_GLPRT_GOTCL(hw->port),
3881 pf->stat_offsets_loaded,
3882 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3883 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3884 pf->stat_offsets_loaded,
3885 &osd->eth.rx_discards,
3886 &nsd->eth.rx_discards);
3887 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3888 I40E_GLPRT_UPRCL(hw->port),
3889 pf->stat_offsets_loaded,
3890 &osd->eth.rx_unicast,
3891 &nsd->eth.rx_unicast);
3892 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3893 I40E_GLPRT_UPTCL(hw->port),
3894 pf->stat_offsets_loaded,
3895 &osd->eth.tx_unicast,
3896 &nsd->eth.tx_unicast);
3897 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3898 I40E_GLPRT_MPRCL(hw->port),
3899 pf->stat_offsets_loaded,
3900 &osd->eth.rx_multicast,
3901 &nsd->eth.rx_multicast);
3902 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3903 I40E_GLPRT_MPTCL(hw->port),
3904 pf->stat_offsets_loaded,
3905 &osd->eth.tx_multicast,
3906 &nsd->eth.tx_multicast);
3907 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3908 I40E_GLPRT_BPRCL(hw->port),
3909 pf->stat_offsets_loaded,
3910 &osd->eth.rx_broadcast,
3911 &nsd->eth.rx_broadcast);
3912 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3913 I40E_GLPRT_BPTCL(hw->port),
3914 pf->stat_offsets_loaded,
3915 &osd->eth.tx_broadcast,
3916 &nsd->eth.tx_broadcast);
3918 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3919 pf->stat_offsets_loaded,
3920 &osd->tx_dropped_link_down,
3921 &nsd->tx_dropped_link_down);
3922 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3923 pf->stat_offsets_loaded,
3924 &osd->mac_local_faults,
3925 &nsd->mac_local_faults);
3926 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3927 pf->stat_offsets_loaded,
3928 &osd->mac_remote_faults,
3929 &nsd->mac_remote_faults);
3930 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3931 pf->stat_offsets_loaded,
3932 &osd->rx_length_errors,
3933 &nsd->rx_length_errors);
3935 /* Flow control (LFC) stats */
3936 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3937 pf->stat_offsets_loaded,
3938 &osd->link_xon_rx, &nsd->link_xon_rx);
3939 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3940 pf->stat_offsets_loaded,
3941 &osd->link_xon_tx, &nsd->link_xon_tx);
3942 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3943 pf->stat_offsets_loaded,
3944 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3945 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3946 pf->stat_offsets_loaded,
3947 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3949 /* Packet size stats rx */
3950 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3951 I40E_GLPRT_PRC64L(hw->port),
3952 pf->stat_offsets_loaded,
3953 &osd->rx_size_64, &nsd->rx_size_64);
3954 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3955 I40E_GLPRT_PRC127L(hw->port),
3956 pf->stat_offsets_loaded,
3957 &osd->rx_size_127, &nsd->rx_size_127);
3958 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3959 I40E_GLPRT_PRC255L(hw->port),
3960 pf->stat_offsets_loaded,
3961 &osd->rx_size_255, &nsd->rx_size_255);
3962 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3963 I40E_GLPRT_PRC511L(hw->port),
3964 pf->stat_offsets_loaded,
3965 &osd->rx_size_511, &nsd->rx_size_511);
3966 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3967 I40E_GLPRT_PRC1023L(hw->port),
3968 pf->stat_offsets_loaded,
3969 &osd->rx_size_1023, &nsd->rx_size_1023);
3970 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3971 I40E_GLPRT_PRC1522L(hw->port),
3972 pf->stat_offsets_loaded,
3973 &osd->rx_size_1522, &nsd->rx_size_1522);
3974 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3975 I40E_GLPRT_PRC9522L(hw->port),
3976 pf->stat_offsets_loaded,
3977 &osd->rx_size_big, &nsd->rx_size_big);
3979 /* Packet size stats tx */
3980 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3981 I40E_GLPRT_PTC64L(hw->port),
3982 pf->stat_offsets_loaded,
3983 &osd->tx_size_64, &nsd->tx_size_64);
3984 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3985 I40E_GLPRT_PTC127L(hw->port),
3986 pf->stat_offsets_loaded,
3987 &osd->tx_size_127, &nsd->tx_size_127);
3988 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3989 I40E_GLPRT_PTC255L(hw->port),
3990 pf->stat_offsets_loaded,
3991 &osd->tx_size_255, &nsd->tx_size_255);
3992 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3993 I40E_GLPRT_PTC511L(hw->port),
3994 pf->stat_offsets_loaded,
3995 &osd->tx_size_511, &nsd->tx_size_511);
3996 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3997 I40E_GLPRT_PTC1023L(hw->port),
3998 pf->stat_offsets_loaded,
3999 &osd->tx_size_1023, &nsd->tx_size_1023);
4000 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4001 I40E_GLPRT_PTC1522L(hw->port),
4002 pf->stat_offsets_loaded,
4003 &osd->tx_size_1522, &nsd->tx_size_1522);
4004 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4005 I40E_GLPRT_PTC9522L(hw->port),
4006 pf->stat_offsets_loaded,
4007 &osd->tx_size_big, &nsd->tx_size_big);
4009 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4010 pf->stat_offsets_loaded,
4011 &osd->rx_undersize, &nsd->rx_undersize);
4012 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4013 pf->stat_offsets_loaded,
4014 &osd->rx_fragments, &nsd->rx_fragments);
4015 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4016 pf->stat_offsets_loaded,
4017 &osd->rx_oversize, &nsd->rx_oversize);
4018 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4019 pf->stat_offsets_loaded,
4020 &osd->rx_jabber, &nsd->rx_jabber);
4021 pf->stat_offsets_loaded = true;
4024 /* Update vsi stats */
4025 ixl_update_eth_stats(vsi);
4028 // ERJ - these are per-port, update all vsis?
4029 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4033 ** Tasklet handler for MSIX Adminq interrupts
4034 ** - do outside interrupt since it might sleep
4037 ixl_do_adminq(void *context, int pending)
4039 struct ixl_pf *pf = context;
4040 struct i40e_hw *hw = &pf->hw;
4041 struct ixl_vsi *vsi = &pf->vsi;
4042 struct i40e_arq_event_info event;
4047 event.buf_len = IXL_AQ_BUF_SZ;
4048 event.msg_buf = malloc(event.buf_len,
4049 M_DEVBUF, M_NOWAIT | M_ZERO);
4050 if (!event.msg_buf) {
4051 printf("Unable to allocate adminq memory\n");
4055 /* clean and process any events */
4057 ret = i40e_clean_arq_element(hw, &event, &result);
4060 opcode = LE16_TO_CPU(event.desc.opcode);
4062 case i40e_aqc_opc_get_link_status:
4063 vsi->link_up = ixl_config_link(hw);
4064 ixl_update_link_status(pf);
4066 case i40e_aqc_opc_send_msg_to_pf:
4067 /* process pf/vf communication here */
4069 case i40e_aqc_opc_event_lan_overflow:
4073 printf("AdminQ unknown event %x\n", opcode);
4078 } while (result && (loop++ < IXL_ADM_LIMIT));
4080 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4081 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4082 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4083 free(event.msg_buf, M_DEVBUF);
4086 ixl_enable_adminq(&pf->hw);
4088 ixl_enable_intr(vsi);
4092 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4095 int error, input = 0;
4097 error = sysctl_handle_int(oidp, &input, 0, req);
4099 if (error || !req->newptr)
4103 pf = (struct ixl_pf *)arg1;
4104 ixl_print_debug_info(pf);
4111 ixl_print_debug_info(struct ixl_pf *pf)
4113 struct i40e_hw *hw = &pf->hw;
4114 struct ixl_vsi *vsi = &pf->vsi;
4115 struct ixl_queue *que = vsi->queues;
4116 struct rx_ring *rxr = &que->rxr;
4117 struct tx_ring *txr = &que->txr;
4121 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4122 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4123 printf("RX next check = %x\n", rxr->next_check);
4124 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4125 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4126 printf("TX desc avail = %x\n", txr->avail);
4128 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4129 printf("RX Bytes = %x\n", reg);
4130 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4131 printf("Port RX Bytes = %x\n", reg);
4132 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4133 printf("RX discard = %x\n", reg);
4134 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4135 printf("Port RX discard = %x\n", reg);
4137 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4138 printf("TX errors = %x\n", reg);
4139 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4140 printf("TX Bytes = %x\n", reg);
4142 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4143 printf("RX undersize = %x\n", reg);
4144 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4145 printf("RX fragments = %x\n", reg);
4146 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4147 printf("RX oversize = %x\n", reg);
4148 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4149 printf("RX length error = %x\n", reg);
4150 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4151 printf("mac remote fault = %x\n", reg);
4152 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4153 printf("mac local fault = %x\n", reg);
4157 * Update VSI-specific ethernet statistics counters.
4159 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4161 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4162 struct i40e_hw *hw = &pf->hw;
4163 struct i40e_eth_stats *es;
4164 struct i40e_eth_stats *oes;
4166 uint64_t tx_discards;
4167 struct i40e_hw_port_stats *nsd;
4168 u16 stat_idx = vsi->info.stat_counter_idx;
4170 es = &vsi->eth_stats;
4171 oes = &vsi->eth_stats_offsets;
4174 /* Gather up the stats that the hw collects */
4175 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4176 vsi->stat_offsets_loaded,
4177 &oes->tx_errors, &es->tx_errors);
4178 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4179 vsi->stat_offsets_loaded,
4180 &oes->rx_discards, &es->rx_discards);
4182 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4183 I40E_GLV_GORCL(stat_idx),
4184 vsi->stat_offsets_loaded,
4185 &oes->rx_bytes, &es->rx_bytes);
4186 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4187 I40E_GLV_UPRCL(stat_idx),
4188 vsi->stat_offsets_loaded,
4189 &oes->rx_unicast, &es->rx_unicast);
4190 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4191 I40E_GLV_MPRCL(stat_idx),
4192 vsi->stat_offsets_loaded,
4193 &oes->rx_multicast, &es->rx_multicast);
4194 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4195 I40E_GLV_BPRCL(stat_idx),
4196 vsi->stat_offsets_loaded,
4197 &oes->rx_broadcast, &es->rx_broadcast);
4199 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4200 I40E_GLV_GOTCL(stat_idx),
4201 vsi->stat_offsets_loaded,
4202 &oes->tx_bytes, &es->tx_bytes);
4203 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4204 I40E_GLV_UPTCL(stat_idx),
4205 vsi->stat_offsets_loaded,
4206 &oes->tx_unicast, &es->tx_unicast);
4207 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4208 I40E_GLV_MPTCL(stat_idx),
4209 vsi->stat_offsets_loaded,
4210 &oes->tx_multicast, &es->tx_multicast);
4211 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4212 I40E_GLV_BPTCL(stat_idx),
4213 vsi->stat_offsets_loaded,
4214 &oes->tx_broadcast, &es->tx_broadcast);
4215 vsi->stat_offsets_loaded = true;
4217 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4218 for (i = 0; i < vsi->num_queues; i++)
4219 tx_discards += vsi->queues[i].txr.br->br_drops;
4221 /* Update ifnet stats */
4222 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4225 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4228 IXL_SET_IBYTES(vsi, es->rx_bytes);
4229 IXL_SET_OBYTES(vsi, es->tx_bytes);
4230 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4231 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4233 IXL_SET_OERRORS(vsi, es->tx_errors);
4234 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4235 IXL_SET_OQDROPS(vsi, tx_discards);
4236 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4237 IXL_SET_COLLISIONS(vsi, 0);
4241 * Reset all of the stats for the given pf
4243 void ixl_pf_reset_stats(struct ixl_pf *pf)
4245 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4246 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4247 pf->stat_offsets_loaded = false;
4251 * Resets all stats of the given vsi
4253 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4255 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4256 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4257 vsi->stat_offsets_loaded = false;
4261 * Read and update a 48 bit stat from the hw
4263 * Since the device stats are not reset at PFReset, they likely will not
4264 * be zeroed when the driver starts. We'll save the first values read
4265 * and use them as offsets to be subtracted from the raw values in order
4266 * to report stats that count from zero.
4269 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4270 bool offset_loaded, u64 *offset, u64 *stat)
4274 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4275 new_data = rd64(hw, loreg);
4278 * Use two rd32's instead of one rd64; FreeBSD versions before
4279 * 10 don't support 8 byte bus reads/writes.
4281 new_data = rd32(hw, loreg);
4282 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4287 if (new_data >= *offset)
4288 *stat = new_data - *offset;
4290 *stat = (new_data + ((u64)1 << 48)) - *offset;
4291 *stat &= 0xFFFFFFFFFFFFULL;
4295 * Read and update a 32 bit stat from the hw
4298 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4299 bool offset_loaded, u64 *offset, u64 *stat)
4303 new_data = rd32(hw, reg);
4306 if (new_data >= *offset)
4307 *stat = (u32)(new_data - *offset);
4309 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4313 ** Set flow control using sysctl:
4320 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4323 * TODO: ensure flow control is disabled if
4324 * priority flow control is enabled
4326 * TODO: ensure tx CRC by hardware should be enabled
4327 * if tx flow control is enabled.
4329 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4330 struct i40e_hw *hw = &pf->hw;
4331 device_t dev = pf->dev;
4333 enum i40e_status_code aq_error = 0;
4337 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4338 if ((error) || (req->newptr == NULL))
4340 if (pf->fc < 0 || pf->fc > 3) {
4342 "Invalid fc mode; valid modes are 0 through 3\n");
4347 ** Changing flow control mode currently does not work on
4350 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4351 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4352 device_printf(dev, "Changing flow control mode unsupported"
4353 " on 40GBase-CR4 media.\n");
4357 /* Set fc ability for port */
4358 hw->fc.requested_mode = pf->fc;
4359 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4362 "%s: Error setting new fc mode %d; fc_err %#x\n",
4363 __func__, aq_error, fc_aq_err);
4371 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4373 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4374 struct i40e_hw *hw = &pf->hw;
4375 int error = 0, index = 0;
4386 ixl_update_link_status(pf);
4388 switch (hw->phy.link_info.link_speed) {
4389 case I40E_LINK_SPEED_100MB:
4392 case I40E_LINK_SPEED_1GB:
4395 case I40E_LINK_SPEED_10GB:
4398 case I40E_LINK_SPEED_40GB:
4401 case I40E_LINK_SPEED_20GB:
4404 case I40E_LINK_SPEED_UNKNOWN:
4410 error = sysctl_handle_string(oidp, speeds[index],
4411 strlen(speeds[index]), req);
4416 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4418 struct i40e_hw *hw = &pf->hw;
4419 device_t dev = pf->dev;
4420 struct i40e_aq_get_phy_abilities_resp abilities;
4421 struct i40e_aq_set_phy_config config;
4422 enum i40e_status_code aq_error = 0;
4424 /* Get current capability information */
4425 aq_error = i40e_aq_get_phy_capabilities(hw,
4426 FALSE, FALSE, &abilities, NULL);
4429 "%s: Error getting phy capabilities %d,"
4430 " aq error: %d\n", __func__, aq_error,
4431 hw->aq.asq_last_status);
4435 /* Prepare new config */
4436 bzero(&config, sizeof(config));
4437 config.phy_type = abilities.phy_type;
4438 config.abilities = abilities.abilities
4439 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4440 config.eee_capability = abilities.eee_capability;
4441 config.eeer = abilities.eeer_val;
4442 config.low_power_ctrl = abilities.d3_lpan;
4443 /* Translate into aq cmd link_speed */
4445 config.link_speed |= I40E_LINK_SPEED_10GB;
4447 config.link_speed |= I40E_LINK_SPEED_1GB;
4449 config.link_speed |= I40E_LINK_SPEED_100MB;
4451 /* Do aq command & restart link */
4452 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4455 "%s: Error setting new phy config %d,"
4456 " aq error: %d\n", __func__, aq_error,
4457 hw->aq.asq_last_status);
4462 ** This seems a bit heavy handed, but we
4463 ** need to get a reinit on some devices
4467 ixl_init_locked(pf);
4474 ** Control link advertise speed:
4476 ** 0x1 - advertise 100 Mb
4477 ** 0x2 - advertise 1G
4478 ** 0x4 - advertise 10G
4480 ** Does not work on 40G devices.
4483 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4485 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4486 struct i40e_hw *hw = &pf->hw;
4487 device_t dev = pf->dev;
4488 int requested_ls = 0;
4492 ** FW doesn't support changing advertised speed
4493 ** for 40G devices; speed is always 40G.
4495 if (i40e_is_40G_device(hw->device_id))
4498 /* Read in new mode */
4499 requested_ls = pf->advertised_speed;
4500 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4501 if ((error) || (req->newptr == NULL))
4503 if (requested_ls < 1 || requested_ls > 7) {
4505 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4509 /* Exit if no change */
4510 if (pf->advertised_speed == requested_ls)
4513 error = ixl_set_advertised_speeds(pf, requested_ls);
4517 pf->advertised_speed = requested_ls;
4518 ixl_update_link_status(pf);
4523 ** Get the width and transaction speed of
4524 ** the bus this adapter is plugged into.
4527 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4533 /* Get the PCI Express Capabilities offset */
4534 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4536 /* ...and read the Link Status Register */
4537 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4539 switch (link & I40E_PCI_LINK_WIDTH) {
4540 case I40E_PCI_LINK_WIDTH_1:
4541 hw->bus.width = i40e_bus_width_pcie_x1;
4543 case I40E_PCI_LINK_WIDTH_2:
4544 hw->bus.width = i40e_bus_width_pcie_x2;
4546 case I40E_PCI_LINK_WIDTH_4:
4547 hw->bus.width = i40e_bus_width_pcie_x4;
4549 case I40E_PCI_LINK_WIDTH_8:
4550 hw->bus.width = i40e_bus_width_pcie_x8;
4553 hw->bus.width = i40e_bus_width_unknown;
4557 switch (link & I40E_PCI_LINK_SPEED) {
4558 case I40E_PCI_LINK_SPEED_2500:
4559 hw->bus.speed = i40e_bus_speed_2500;
4561 case I40E_PCI_LINK_SPEED_5000:
4562 hw->bus.speed = i40e_bus_speed_5000;
4564 case I40E_PCI_LINK_SPEED_8000:
4565 hw->bus.speed = i40e_bus_speed_8000;
4568 hw->bus.speed = i40e_bus_speed_unknown;
4573 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4574 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4575 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4576 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4577 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4578 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4579 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4582 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4583 (hw->bus.speed < i40e_bus_speed_8000)) {
4584 device_printf(dev, "PCI-Express bandwidth available"
4585 " for this device\n is not sufficient for"
4586 " normal operation.\n");
4587 device_printf(dev, "For expected performance a x8 "
4588 "PCIE Gen3 slot is required.\n");
4595 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4597 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4598 struct i40e_hw *hw = &pf->hw;
4601 snprintf(buf, sizeof(buf),
4602 "f%d.%d a%d.%d n%02x.%02x e%08x",
4603 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4604 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4605 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4606 IXL_NVM_VERSION_HI_SHIFT,
4607 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4608 IXL_NVM_VERSION_LO_SHIFT,
4610 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4614 #ifdef IXL_DEBUG_SYSCTL
4616 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4618 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4619 struct i40e_hw *hw = &pf->hw;
4620 struct i40e_link_status link_status;
4623 enum i40e_status_code aq_error = 0;
4625 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4627 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4632 "PHY Type : %#04x\n"
4634 "Link info: %#04x\n"
4637 link_status.phy_type, link_status.link_speed,
4638 link_status.link_info, link_status.an_info,
4639 link_status.ext_info);
4641 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4645 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4647 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4648 struct i40e_hw *hw = &pf->hw;
4649 struct i40e_aq_get_phy_abilities_resp abilities_resp;
4652 enum i40e_status_code aq_error = 0;
4654 // TODO: Print out list of qualified modules as well?
4655 aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4657 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4662 "PHY Type : %#010x\n"
4664 "Abilities: %#04x\n"
4666 "EEER reg : %#010x\n"
4668 abilities_resp.phy_type, abilities_resp.link_speed,
4669 abilities_resp.abilities, abilities_resp.eee_capability,
4670 abilities_resp.eeer_val, abilities_resp.d3_lpan);
4672 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4676 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4678 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4679 struct ixl_vsi *vsi = &pf->vsi;
4680 struct ixl_mac_filter *f;
4685 int ftl_counter = 0;
4689 SLIST_FOREACH(f, &vsi->ftl, next) {
4694 sysctl_handle_string(oidp, "(none)", 6, req);
4698 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4699 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4701 sprintf(buf_i++, "\n");
4702 SLIST_FOREACH(f, &vsi->ftl, next) {
4704 MAC_FORMAT ", vlan %4d, flags %#06x",
4705 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4707 /* don't print '\n' for last entry */
4708 if (++ftl_counter != ftl_len) {
4709 sprintf(buf_i, "\n");
4714 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4716 printf("sysctl error: %d\n", error);
4717 free(buf, M_DEVBUF);
4721 #define IXL_SW_RES_SIZE 0x14
4723 ixl_res_alloc_cmp(const void *a, const void *b)
4725 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4726 one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4727 two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4729 return ((int)one->resource_type - (int)two->resource_type);
4733 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4735 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4736 struct i40e_hw *hw = &pf->hw;
4737 device_t dev = pf->dev;
4742 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4744 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4746 device_printf(dev, "Could not allocate sbuf for output.\n");
4750 bzero(resp, sizeof(resp));
4751 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4756 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4757 __func__, error, hw->aq.asq_last_status);
4762 /* Sort entries by type for display */
4763 qsort(resp, num_entries,
4764 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4765 &ixl_res_alloc_cmp);
4767 sbuf_cat(buf, "\n");
4768 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4770 "Type | Guaranteed | Total | Used | Un-allocated\n"
4771 " | (this) | (all) | (this) | (all) \n");
4772 for (int i = 0; i < num_entries; i++) {
4774 "%#4x | %10d %5d %6d %12d",
4775 resp[i].resource_type,
4779 resp[i].total_unalloced);
4780 if (i < num_entries - 1)
4781 sbuf_cat(buf, "\n");
4784 error = sbuf_finish(buf);
4786 device_printf(dev, "Error finishing sbuf: %d\n", error);
4791 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4793 device_printf(dev, "sysctl error: %d\n", error);
4799 ** Caller must init and delete sbuf; this function will clear and
4800 ** finish it for caller.
4803 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4807 if (seid == 0 && uplink)
4808 sbuf_cat(s, "Network");
4810 sbuf_cat(s, "Host");
4814 sbuf_printf(s, "MAC %d", seid - 2);
4815 else if (seid <= 15)
4816 sbuf_cat(s, "Reserved");
4817 else if (seid <= 31)
4818 sbuf_printf(s, "PF %d", seid - 16);
4819 else if (seid <= 159)
4820 sbuf_printf(s, "VF %d", seid - 32);
4821 else if (seid <= 287)
4822 sbuf_cat(s, "Reserved");
4823 else if (seid <= 511)
4824 sbuf_cat(s, "Other"); // for other structures
4825 else if (seid <= 895)
4826 sbuf_printf(s, "VSI %d", seid - 512);
4827 else if (seid <= 1023)
4828 sbuf_printf(s, "Reserved");
4830 sbuf_cat(s, "Invalid");
4833 return sbuf_data(s);
4837 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4839 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4840 struct i40e_hw *hw = &pf->hw;
4841 device_t dev = pf->dev;
4845 u8 aq_buf[I40E_AQ_LARGE_BUF];
4848 struct i40e_aqc_get_switch_config_resp *sw_config;
4849 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4851 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4853 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4857 error = i40e_aq_get_switch_config(hw, sw_config,
4858 sizeof(aq_buf), &next, NULL);
4860 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4861 __func__, error, hw->aq.asq_last_status);
4866 nmbuf = sbuf_new_auto();
4868 device_printf(dev, "Could not allocate sbuf for name output.\n");
4872 sbuf_cat(buf, "\n");
4873 // Assuming <= 255 elements in switch
4874 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4876 ** Revision -- all elements are revision 1 for now
4879 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4880 " | | | (uplink)\n");
4881 for (int i = 0; i < sw_config->header.num_reported; i++) {
4882 // "%4d (%8s) | %8s %8s %#8x",
4883 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4885 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4886 sbuf_cat(buf, " | ");
4887 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4889 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4891 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4892 if (i < sw_config->header.num_reported - 1)
4893 sbuf_cat(buf, "\n");
4897 error = sbuf_finish(buf);
4899 device_printf(dev, "Error finishing sbuf: %d\n", error);
4904 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4906 device_printf(dev, "sysctl error: %d\n", error);
4913 ** Dump TX desc given index.
4914 ** Doesn't work; don't use.
4915 ** TODO: Also needs a queue index input!
4918 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4920 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4921 device_t dev = pf->dev;
4927 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4929 device_printf(dev, "Could not allocate sbuf for output.\n");
4934 error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4937 if (req->newptr == NULL)
4938 return (EIO); // fix
4939 if (desc_idx > 1024) { // fix
4941 "Invalid descriptor index, needs to be < 1024\n"); // fix
4945 // Don't use this sysctl yet
4949 sbuf_cat(buf, "\n");
4952 struct ixl_queue *que = pf->vsi.queues;
4953 struct tx_ring *txr = &(que[1].txr);
4954 struct i40e_tx_desc *txd = &txr->base[desc_idx];
4956 sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4957 sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4958 sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4960 error = sbuf_finish(buf);
4962 device_printf(dev, "Error finishing sbuf: %d\n", error);
4967 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4969 device_printf(dev, "sysctl error: %d\n", error);
4973 #endif /* IXL_DEBUG_SYSCTL */