1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifndef IXL_STANDALONE_BUILD
37 #include "opt_inet6.h"
44 #include <net/rss_config.h>
47 /*********************************************************************
49 *********************************************************************/
50 char ixl_driver_version[] = "1.3.6";
52 /*********************************************************************
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixl_strings
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
62 static ixl_vendor_info_t ixl_vendor_info_array[] =
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72 /* required last entry */
76 /*********************************************************************
77 * Table of branding strings
78 *********************************************************************/
80 static char *ixl_strings[] = {
81 "Intel(R) Ethernet Connection XL710 Driver"
85 /*********************************************************************
87 *********************************************************************/
88 static int ixl_probe(device_t);
89 static int ixl_attach(device_t);
90 static int ixl_detach(device_t);
91 static int ixl_shutdown(device_t);
92 static int ixl_get_hw_capabilities(struct ixl_pf *);
93 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
94 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
95 static void ixl_init(void *);
96 static void ixl_init_locked(struct ixl_pf *);
97 static void ixl_stop(struct ixl_pf *);
98 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
99 static int ixl_media_change(struct ifnet *);
100 static void ixl_update_link_status(struct ixl_pf *);
101 static int ixl_allocate_pci_resources(struct ixl_pf *);
102 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
103 static int ixl_setup_stations(struct ixl_pf *);
104 static int ixl_switch_config(struct ixl_pf *);
105 static int ixl_initialize_vsi(struct ixl_vsi *);
106 static int ixl_assign_vsi_msix(struct ixl_pf *);
107 static int ixl_assign_vsi_legacy(struct ixl_pf *);
108 static int ixl_init_msix(struct ixl_pf *);
109 static void ixl_configure_msix(struct ixl_pf *);
110 static void ixl_configure_itr(struct ixl_pf *);
111 static void ixl_configure_legacy(struct ixl_pf *);
112 static void ixl_free_pci_resources(struct ixl_pf *);
113 static void ixl_local_timer(void *);
114 static int ixl_setup_interface(device_t, struct ixl_vsi *);
115 static bool ixl_config_link(struct i40e_hw *);
116 static void ixl_config_rss(struct ixl_vsi *);
117 static void ixl_set_queue_rx_itr(struct ixl_queue *);
118 static void ixl_set_queue_tx_itr(struct ixl_queue *);
119 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
121 static void ixl_enable_rings(struct ixl_vsi *);
122 static void ixl_disable_rings(struct ixl_vsi *);
123 static void ixl_enable_intr(struct ixl_vsi *);
124 static void ixl_disable_intr(struct ixl_vsi *);
126 static void ixl_enable_adminq(struct i40e_hw *);
127 static void ixl_disable_adminq(struct i40e_hw *);
128 static void ixl_enable_queue(struct i40e_hw *, int);
129 static void ixl_disable_queue(struct i40e_hw *, int);
130 static void ixl_enable_legacy(struct i40e_hw *);
131 static void ixl_disable_legacy(struct i40e_hw *);
133 static void ixl_set_promisc(struct ixl_vsi *);
134 static void ixl_add_multi(struct ixl_vsi *);
135 static void ixl_del_multi(struct ixl_vsi *);
136 static void ixl_register_vlan(void *, struct ifnet *, u16);
137 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
138 static void ixl_setup_vlan_filters(struct ixl_vsi *);
140 static void ixl_init_filters(struct ixl_vsi *);
141 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
142 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
143 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
144 static void ixl_del_hw_filters(struct ixl_vsi *, int);
145 static struct ixl_mac_filter *
146 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
147 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
149 /* Sysctl debug interface */
150 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
151 static void ixl_print_debug_info(struct ixl_pf *);
153 /* The MSI/X Interrupt handlers */
154 static void ixl_intr(void *);
155 static void ixl_msix_que(void *);
156 static void ixl_msix_adminq(void *);
157 static void ixl_handle_mdd_event(struct ixl_pf *);
159 /* Deferred interrupt tasklets */
160 static void ixl_do_adminq(void *, int);
162 /* Sysctl handlers */
163 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
164 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
165 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
166 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
169 static void ixl_add_hw_stats(struct ixl_pf *);
170 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
171 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
172 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
173 struct sysctl_oid_list *,
174 struct i40e_eth_stats *);
175 static void ixl_update_stats_counters(struct ixl_pf *);
176 static void ixl_update_eth_stats(struct ixl_vsi *);
177 static void ixl_pf_reset_stats(struct ixl_pf *);
178 static void ixl_vsi_reset_stats(struct ixl_vsi *);
179 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
181 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
184 #ifdef IXL_DEBUG_SYSCTL
185 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
186 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
187 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
188 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
189 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
190 static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
193 /*********************************************************************
194 * FreeBSD Device Interface Entry Points
195 *********************************************************************/
197 static device_method_t ixl_methods[] = {
198 /* Device interface */
199 DEVMETHOD(device_probe, ixl_probe),
200 DEVMETHOD(device_attach, ixl_attach),
201 DEVMETHOD(device_detach, ixl_detach),
202 DEVMETHOD(device_shutdown, ixl_shutdown),
206 static driver_t ixl_driver = {
207 "ixl", ixl_methods, sizeof(struct ixl_pf),
210 devclass_t ixl_devclass;
211 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
213 MODULE_DEPEND(ixl, pci, 1, 1, 1);
214 MODULE_DEPEND(ixl, ether, 1, 1, 1);
216 MODULE_DEPEND(ixl, netmap, 1, 1, 1);
217 #endif /* DEV_NETMAP */
220 ** Global reset mutex
222 static struct mtx ixl_reset_mtx;
225 ** TUNEABLE PARAMETERS:
228 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
229 "IXL driver parameters");
232 * MSIX should be the default for best performance,
233 * but this allows it to be forced off for testing.
235 static int ixl_enable_msix = 1;
236 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
237 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
238 "Enable MSI-X interrupts");
241 ** Number of descriptors per ring:
242 ** - TX and RX are the same size
244 static int ixl_ringsz = DEFAULT_RING;
245 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
247 &ixl_ringsz, 0, "Descriptor Ring Size");
250 ** This can be set manually, if left as 0 the
251 ** number of queues will be calculated based
252 ** on cpus and msix vectors available.
254 int ixl_max_queues = 0;
255 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
256 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
257 &ixl_max_queues, 0, "Number of Queues");
260 ** Controls for Interrupt Throttling
261 ** - true/false for dynamic adjustment
262 ** - default values for static ITR
264 int ixl_dynamic_rx_itr = 0;
265 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
266 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
267 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
269 int ixl_dynamic_tx_itr = 0;
270 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
271 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
272 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
274 int ixl_rx_itr = IXL_ITR_8K;
275 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
277 &ixl_rx_itr, 0, "RX Interrupt Rate");
279 int ixl_tx_itr = IXL_ITR_4K;
280 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
281 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
282 &ixl_tx_itr, 0, "TX Interrupt Rate");
285 static int ixl_enable_fdir = 1;
286 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
287 /* Rate at which we sample */
288 int ixl_atr_rate = 20;
289 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
293 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
294 #include <dev/netmap/if_ixl_netmap.h>
295 #endif /* DEV_NETMAP */
297 static char *ixl_fc_string[6] = {
307 /*********************************************************************
308 * Device identification routine
310 * ixl_probe determines if the driver should be loaded on
311 * the hardware based on PCI vendor/device id of the device.
313 * return BUS_PROBE_DEFAULT on success, positive on failure
314 *********************************************************************/
317 ixl_probe(device_t dev)
319 ixl_vendor_info_t *ent;
321 u16 pci_vendor_id, pci_device_id;
322 u16 pci_subvendor_id, pci_subdevice_id;
323 char device_name[256];
324 static bool lock_init = FALSE;
326 INIT_DEBUGOUT("ixl_probe: begin");
328 pci_vendor_id = pci_get_vendor(dev);
329 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
332 pci_device_id = pci_get_device(dev);
333 pci_subvendor_id = pci_get_subvendor(dev);
334 pci_subdevice_id = pci_get_subdevice(dev);
336 ent = ixl_vendor_info_array;
337 while (ent->vendor_id != 0) {
338 if ((pci_vendor_id == ent->vendor_id) &&
339 (pci_device_id == ent->device_id) &&
341 ((pci_subvendor_id == ent->subvendor_id) ||
342 (ent->subvendor_id == 0)) &&
344 ((pci_subdevice_id == ent->subdevice_id) ||
345 (ent->subdevice_id == 0))) {
346 sprintf(device_name, "%s, Version - %s",
347 ixl_strings[ent->index],
349 device_set_desc_copy(dev, device_name);
350 /* One shot mutex init */
351 if (lock_init == FALSE) {
353 mtx_init(&ixl_reset_mtx,
355 "IXL RESET Lock", MTX_DEF);
357 return (BUS_PROBE_DEFAULT);
364 /*********************************************************************
365 * Device initialization routine
367 * The attach entry point is called when the driver is being loaded.
368 * This routine identifies the type of hardware, allocates all resources
369 * and initializes the hardware.
371 * return 0 on success, positive on failure
372 *********************************************************************/
375 ixl_attach(device_t dev)
383 INIT_DEBUGOUT("ixl_attach: begin");
385 /* Allocate, clear, and link in our primary soft structure */
386 pf = device_get_softc(dev);
387 pf->dev = pf->osdep.dev = dev;
391 ** Note this assumes we have a single embedded VSI,
392 ** this could be enhanced later to allocate multiple
398 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
400 /* Set up the timer callout */
401 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
407 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
409 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
410 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
412 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
414 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
415 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
417 pf, 0, ixl_current_speed, "A", "Current Port Speed");
419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
422 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
424 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
425 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426 OID_AUTO, "rx_itr", CTLFLAG_RW,
427 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
429 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
430 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
431 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
432 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
434 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
435 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
436 OID_AUTO, "tx_itr", CTLFLAG_RW,
437 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
439 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
440 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
441 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
442 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
444 #ifdef IXL_DEBUG_SYSCTL
445 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
448 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
450 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
453 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
455 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
458 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
460 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
463 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
465 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
466 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
468 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
470 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
471 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472 OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
473 pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
476 /* Save off the PCI information */
477 hw->vendor_id = pci_get_vendor(dev);
478 hw->device_id = pci_get_device(dev);
479 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
480 hw->subsystem_vendor_id =
481 pci_read_config(dev, PCIR_SUBVEND_0, 2);
482 hw->subsystem_device_id =
483 pci_read_config(dev, PCIR_SUBDEV_0, 2);
485 hw->bus.device = pci_get_slot(dev);
486 hw->bus.func = pci_get_function(dev);
488 /* Do PCI setup - map BAR0, etc */
489 if (ixl_allocate_pci_resources(pf)) {
490 device_printf(dev, "Allocation of PCI resources failed\n");
495 /* Create for initial debugging use */
496 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
497 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
499 ixl_debug_info, "I", "Debug Information");
502 /* Establish a clean starting point */
504 error = i40e_pf_reset(hw);
506 device_printf(dev,"PF reset failure %x\n", error);
511 /* Set admin queue parameters */
512 hw->aq.num_arq_entries = IXL_AQ_LEN;
513 hw->aq.num_asq_entries = IXL_AQ_LEN;
514 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
515 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
517 /* Initialize the shared code */
518 error = i40e_init_shared_code(hw);
520 device_printf(dev,"Unable to initialize the shared code\n");
525 /* Set up the admin queue */
526 error = i40e_init_adminq(hw);
528 device_printf(dev, "The driver for the device stopped "
529 "because the NVM image is newer than expected.\n"
530 "You must install the most recent version of "
531 " the network driver.\n");
534 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
536 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
537 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
538 device_printf(dev, "The driver for the device detected "
539 "a newer version of the NVM image than expected.\n"
540 "Please install the most recent version of the network driver.\n");
541 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
542 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
543 device_printf(dev, "The driver for the device detected "
544 "an older version of the NVM image than expected.\n"
545 "Please update the NVM image.\n");
548 i40e_clear_pxe_mode(hw);
550 /* Get capabilities from the device */
551 error = ixl_get_hw_capabilities(pf);
553 device_printf(dev, "HW capabilities failure!\n");
557 /* Set up host memory cache */
558 error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
560 device_printf(dev, "init_lan_hmc failed: %d\n", error);
564 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
566 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
570 /* Disable LLDP from the firmware */
571 i40e_aq_stop_lldp(hw, TRUE, NULL);
573 i40e_get_mac_addr(hw, hw->mac.addr);
574 error = i40e_validate_mac_addr(hw->mac.addr);
576 device_printf(dev, "validate_mac_addr failed: %d\n", error);
579 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
580 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
582 /* Set up VSI and queues */
583 if (ixl_setup_stations(pf) != 0) {
584 device_printf(dev, "setup stations failed!\n");
589 /* Initialize mac filter list for VSI */
590 SLIST_INIT(&vsi->ftl);
592 /* Set up interrupt routing here */
594 error = ixl_assign_vsi_msix(pf);
596 error = ixl_assign_vsi_legacy(pf);
600 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
601 (hw->aq.fw_maj_ver < 4)) {
603 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
605 device_printf(dev, "link restart failed, aq_err=%d\n",
606 pf->hw.aq.asq_last_status);
609 /* Determine link state */
610 vsi->link_up = ixl_config_link(hw);
612 /* Report if Unqualified modules are found */
613 if ((vsi->link_up == FALSE) &&
614 (pf->hw.phy.link_info.link_info &
615 I40E_AQ_MEDIA_AVAILABLE) &&
616 (!(pf->hw.phy.link_info.an_info &
617 I40E_AQ_QUALIFIED_MODULE)))
618 device_printf(dev, "Link failed because "
619 "an unqualified module was detected\n");
621 /* Setup OS specific network interface */
622 if (ixl_setup_interface(dev, vsi) != 0) {
623 device_printf(dev, "interface setup failed!\n");
628 error = ixl_switch_config(pf);
630 device_printf(dev, "Initial switch config failed: %d\n", error);
634 /* Limit phy interrupts to link and modules failure */
635 error = i40e_aq_set_phy_int_mask(hw,
636 I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
638 device_printf(dev, "set phy mask failed: %d\n", error);
640 /* Get the bus configuration and set the shared code */
641 bus = ixl_get_bus_info(hw, dev);
642 i40e_set_pci_config_data(hw, bus);
644 /* Initialize statistics */
645 ixl_pf_reset_stats(pf);
646 ixl_update_stats_counters(pf);
647 ixl_add_hw_stats(pf);
649 /* Register for VLAN events */
650 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
651 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
652 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
653 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
657 ixl_netmap_attach(vsi);
658 #endif /* DEV_NETMAP */
659 INIT_DEBUGOUT("ixl_attach: end");
663 if (vsi->ifp != NULL)
666 i40e_shutdown_lan_hmc(hw);
668 i40e_shutdown_adminq(hw);
670 ixl_free_pci_resources(pf);
672 IXL_PF_LOCK_DESTROY(pf);
676 /*********************************************************************
677 * Device removal routine
679 * The detach entry point is called when the driver is being removed.
680 * This routine stops the adapter and deallocates all the resources
681 * that were allocated for driver operation.
683 * return 0 on success, positive on failure
684 *********************************************************************/
687 ixl_detach(device_t dev)
689 struct ixl_pf *pf = device_get_softc(dev);
690 struct i40e_hw *hw = &pf->hw;
691 struct ixl_vsi *vsi = &pf->vsi;
692 struct ixl_queue *que = vsi->queues;
695 INIT_DEBUGOUT("ixl_detach: begin");
697 /* Make sure VLANS are not using driver */
698 if (vsi->ifp->if_vlantrunk != NULL) {
699 device_printf(dev,"Vlan in use, detach first\n");
703 ether_ifdetach(vsi->ifp);
704 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
710 for (int i = 0; i < vsi->num_queues; i++, que++) {
712 taskqueue_drain(que->tq, &que->task);
713 taskqueue_drain(que->tq, &que->tx_task);
714 taskqueue_free(que->tq);
718 /* Shutdown LAN HMC */
719 status = i40e_shutdown_lan_hmc(hw);
722 "Shutdown LAN HMC failed with code %d\n", status);
724 /* Shutdown admin queue */
725 status = i40e_shutdown_adminq(hw);
728 "Shutdown Admin queue failed with code %d\n", status);
730 /* Unregister VLAN events */
731 if (vsi->vlan_attach != NULL)
732 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
733 if (vsi->vlan_detach != NULL)
734 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
736 callout_drain(&pf->timer);
738 netmap_detach(vsi->ifp);
739 #endif /* DEV_NETMAP */
742 ixl_free_pci_resources(pf);
743 bus_generic_detach(dev);
746 IXL_PF_LOCK_DESTROY(pf);
750 /*********************************************************************
752 * Shutdown entry point
754 **********************************************************************/
757 ixl_shutdown(device_t dev)
759 struct ixl_pf *pf = device_get_softc(dev);
767 /*********************************************************************
769 * Get the hardware capabilities
771 **********************************************************************/
774 ixl_get_hw_capabilities(struct ixl_pf *pf)
776 struct i40e_aqc_list_capabilities_element_resp *buf;
777 struct i40e_hw *hw = &pf->hw;
778 device_t dev = pf->dev;
783 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
785 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
786 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
787 device_printf(dev, "Unable to allocate cap memory\n");
791 /* This populates the hw struct */
792 error = i40e_aq_discover_capabilities(hw, buf, len,
793 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
795 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
797 /* retry once with a larger buffer */
801 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
802 device_printf(dev, "capability discovery failed: %d\n",
803 pf->hw.aq.asq_last_status);
807 /* Capture this PF's starting queue pair */
808 pf->qbase = hw->func_caps.base_queue;
811 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
812 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
813 hw->pf_id, hw->func_caps.num_vfs,
814 hw->func_caps.num_msix_vectors,
815 hw->func_caps.num_msix_vectors_vf,
816 hw->func_caps.fd_filters_guaranteed,
817 hw->func_caps.fd_filters_best_effort,
818 hw->func_caps.num_tx_qp,
819 hw->func_caps.num_rx_qp,
820 hw->func_caps.base_queue);
826 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
828 device_t dev = vsi->dev;
830 /* Enable/disable TXCSUM/TSO4 */
831 if (!(ifp->if_capenable & IFCAP_TXCSUM)
832 && !(ifp->if_capenable & IFCAP_TSO4)) {
833 if (mask & IFCAP_TXCSUM) {
834 ifp->if_capenable |= IFCAP_TXCSUM;
835 /* enable TXCSUM, restore TSO if previously enabled */
836 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
837 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
838 ifp->if_capenable |= IFCAP_TSO4;
841 else if (mask & IFCAP_TSO4) {
842 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
843 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
845 "TSO4 requires txcsum, enabling both...\n");
847 } else if((ifp->if_capenable & IFCAP_TXCSUM)
848 && !(ifp->if_capenable & IFCAP_TSO4)) {
849 if (mask & IFCAP_TXCSUM)
850 ifp->if_capenable &= ~IFCAP_TXCSUM;
851 else if (mask & IFCAP_TSO4)
852 ifp->if_capenable |= IFCAP_TSO4;
853 } else if((ifp->if_capenable & IFCAP_TXCSUM)
854 && (ifp->if_capenable & IFCAP_TSO4)) {
855 if (mask & IFCAP_TXCSUM) {
856 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
857 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
859 "TSO4 requires txcsum, disabling both...\n");
860 } else if (mask & IFCAP_TSO4)
861 ifp->if_capenable &= ~IFCAP_TSO4;
864 /* Enable/disable TXCSUM_IPV6/TSO6 */
865 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866 && !(ifp->if_capenable & IFCAP_TSO6)) {
867 if (mask & IFCAP_TXCSUM_IPV6) {
868 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
869 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
870 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
871 ifp->if_capenable |= IFCAP_TSO6;
873 } else if (mask & IFCAP_TSO6) {
874 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
875 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
877 "TSO6 requires txcsum6, enabling both...\n");
879 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
880 && !(ifp->if_capenable & IFCAP_TSO6)) {
881 if (mask & IFCAP_TXCSUM_IPV6)
882 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
883 else if (mask & IFCAP_TSO6)
884 ifp->if_capenable |= IFCAP_TSO6;
885 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
886 && (ifp->if_capenable & IFCAP_TSO6)) {
887 if (mask & IFCAP_TXCSUM_IPV6) {
888 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
889 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
891 "TSO6 requires txcsum6, disabling both...\n");
892 } else if (mask & IFCAP_TSO6)
893 ifp->if_capenable &= ~IFCAP_TSO6;
897 /*********************************************************************
900 * ixl_ioctl is called when the user wants to configure the
903 * return 0 on success, positive on failure
904 **********************************************************************/
907 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
909 struct ixl_vsi *vsi = ifp->if_softc;
910 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
911 struct ifreq *ifr = (struct ifreq *) data;
912 #if defined(INET) || defined(INET6)
913 struct ifaddr *ifa = (struct ifaddr *)data;
914 bool avoid_reset = FALSE;
922 if (ifa->ifa_addr->sa_family == AF_INET)
926 if (ifa->ifa_addr->sa_family == AF_INET6)
929 #if defined(INET) || defined(INET6)
931 ** Calling init results in link renegotiation,
932 ** so we avoid doing it when possible.
935 ifp->if_flags |= IFF_UP;
936 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
939 if (!(ifp->if_flags & IFF_NOARP))
940 arp_ifinit(ifp, ifa);
943 error = ether_ioctl(ifp, command, data);
947 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
948 if (ifr->ifr_mtu > IXL_MAX_FRAME -
949 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
953 ifp->if_mtu = ifr->ifr_mtu;
954 vsi->max_frame_size =
955 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
956 + ETHER_VLAN_ENCAP_LEN;
962 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
964 if (ifp->if_flags & IFF_UP) {
965 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
966 if ((ifp->if_flags ^ pf->if_flags) &
967 (IFF_PROMISC | IFF_ALLMULTI)) {
968 ixl_set_promisc(vsi);
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
975 pf->if_flags = ifp->if_flags;
979 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
980 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982 ixl_disable_intr(vsi);
984 ixl_enable_intr(vsi);
989 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
990 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
992 ixl_disable_intr(vsi);
994 ixl_enable_intr(vsi);
1000 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1001 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1005 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1006 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1008 ixl_cap_txcsum_tso(vsi, ifp, mask);
1010 if (mask & IFCAP_RXCSUM)
1011 ifp->if_capenable ^= IFCAP_RXCSUM;
1012 if (mask & IFCAP_RXCSUM_IPV6)
1013 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1014 if (mask & IFCAP_LRO)
1015 ifp->if_capenable ^= IFCAP_LRO;
1016 if (mask & IFCAP_VLAN_HWTAGGING)
1017 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1018 if (mask & IFCAP_VLAN_HWFILTER)
1019 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1020 if (mask & IFCAP_VLAN_HWTSO)
1021 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1022 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1024 ixl_init_locked(pf);
1027 VLAN_CAPABILITIES(ifp);
1033 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1034 error = ether_ioctl(ifp, command, data);
1042 /*********************************************************************
1045 * This routine is used in two ways. It is used by the stack as
1046 * init entry point in network interface structure. It is also used
1047 * by the driver as a hw/sw initialization routine to get to a
1050 * return 0 on success, positive on failure
1051 **********************************************************************/
1054 ixl_init_locked(struct ixl_pf *pf)
1056 struct i40e_hw *hw = &pf->hw;
1057 struct ixl_vsi *vsi = &pf->vsi;
1058 struct ifnet *ifp = vsi->ifp;
1059 device_t dev = pf->dev;
1060 struct i40e_filter_control_settings filter;
1061 u8 tmpaddr[ETHER_ADDR_LEN];
1064 mtx_assert(&pf->pf_mtx, MA_OWNED);
1065 INIT_DEBUGOUT("ixl_init: begin");
1068 /* Get the latest mac address... User might use a LAA */
1069 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1070 I40E_ETH_LENGTH_OF_ADDRESS);
1071 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1072 i40e_validate_mac_addr(tmpaddr)) {
1073 bcopy(tmpaddr, hw->mac.addr,
1074 I40E_ETH_LENGTH_OF_ADDRESS);
1075 ret = i40e_aq_mac_address_write(hw,
1076 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1077 hw->mac.addr, NULL);
1079 device_printf(dev, "LLA address"
1080 "change failed!!\n");
1085 /* Set the various hardware offload abilities */
1086 ifp->if_hwassist = 0;
1087 if (ifp->if_capenable & IFCAP_TSO)
1088 ifp->if_hwassist |= CSUM_TSO;
1089 if (ifp->if_capenable & IFCAP_TXCSUM)
1090 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1091 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1092 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1094 /* Set up the device filtering */
1095 bzero(&filter, sizeof(filter));
1096 filter.enable_ethtype = TRUE;
1097 filter.enable_macvlan = TRUE;
1099 filter.enable_fdir = TRUE;
1101 if (i40e_set_filter_control(hw, &filter))
1102 device_printf(dev, "set_filter_control() failed\n");
1105 ixl_config_rss(vsi);
1108 ** Prepare the VSI: rings, hmc contexts, etc...
1110 if (ixl_initialize_vsi(vsi)) {
1111 device_printf(dev, "initialize vsi failed!!\n");
1115 /* Add protocol filters to list */
1116 ixl_init_filters(vsi);
1118 /* Setup vlan's if needed */
1119 ixl_setup_vlan_filters(vsi);
1121 /* Start the local timer */
1122 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1124 /* Set up MSI/X routing and the ITR settings */
1125 if (ixl_enable_msix) {
1126 ixl_configure_msix(pf);
1127 ixl_configure_itr(pf);
1129 ixl_configure_legacy(pf);
1131 ixl_enable_rings(vsi);
1133 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1135 /* Set MTU in hardware*/
1136 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1139 device_printf(vsi->dev,
1140 "aq_set_mac_config in init error, code %d\n",
1143 /* And now turn on interrupts */
1144 ixl_enable_intr(vsi);
1146 /* Now inform the stack we're ready */
1147 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1148 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1156 struct ixl_pf *pf = arg;
1159 ixl_init_locked(pf);
1166 ** MSIX Interrupt Handlers and Tasklets
1170 ixl_handle_que(void *context, int pending)
1172 struct ixl_queue *que = context;
1173 struct ixl_vsi *vsi = que->vsi;
1174 struct i40e_hw *hw = vsi->hw;
1175 struct tx_ring *txr = &que->txr;
1176 struct ifnet *ifp = vsi->ifp;
1179 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1180 more = ixl_rxeof(que, IXL_RX_LIMIT);
1183 if (!drbr_empty(ifp, txr->br))
1184 ixl_mq_start_locked(ifp, txr);
1187 taskqueue_enqueue(que->tq, &que->task);
1192 /* Reenable this interrupt - hmmm */
1193 ixl_enable_queue(hw, que->me);
1198 /*********************************************************************
1200 * Legacy Interrupt Service routine
1202 **********************************************************************/
1206 struct ixl_pf *pf = arg;
1207 struct i40e_hw *hw = &pf->hw;
1208 struct ixl_vsi *vsi = &pf->vsi;
1209 struct ixl_queue *que = vsi->queues;
1210 struct ifnet *ifp = vsi->ifp;
1211 struct tx_ring *txr = &que->txr;
1212 u32 reg, icr0, mask;
1213 bool more_tx, more_rx;
1217 /* Protect against spurious interrupts */
1218 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1221 icr0 = rd32(hw, I40E_PFINT_ICR0);
1223 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1224 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1225 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1227 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1229 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1230 taskqueue_enqueue(pf->tq, &pf->adminq);
1234 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1237 more_tx = ixl_txeof(que);
1238 if (!drbr_empty(vsi->ifp, txr->br))
1242 /* re-enable other interrupt causes */
1243 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1245 /* And now the queues */
1246 reg = rd32(hw, I40E_QINT_RQCTL(0));
1247 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1248 wr32(hw, I40E_QINT_RQCTL(0), reg);
1250 reg = rd32(hw, I40E_QINT_TQCTL(0));
1251 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1252 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1253 wr32(hw, I40E_QINT_TQCTL(0), reg);
1255 ixl_enable_legacy(hw);
1261 /*********************************************************************
1263 * MSIX VSI Interrupt Service routine
1265 **********************************************************************/
1267 ixl_msix_que(void *arg)
1269 struct ixl_queue *que = arg;
1270 struct ixl_vsi *vsi = que->vsi;
1271 struct i40e_hw *hw = vsi->hw;
1272 struct tx_ring *txr = &que->txr;
1273 bool more_tx, more_rx;
1275 /* Protect against spurious interrupts */
1276 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1281 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1284 more_tx = ixl_txeof(que);
1286 ** Make certain that if the stack
1287 ** has anything queued the task gets
1288 ** scheduled to handle it.
1290 if (!drbr_empty(vsi->ifp, txr->br))
1294 ixl_set_queue_rx_itr(que);
1295 ixl_set_queue_tx_itr(que);
1297 if (more_tx || more_rx)
1298 taskqueue_enqueue(que->tq, &que->task);
1300 ixl_enable_queue(hw, que->me);
1306 /*********************************************************************
1308 * MSIX Admin Queue Interrupt Service routine
1310 **********************************************************************/
1312 ixl_msix_adminq(void *arg)
1314 struct ixl_pf *pf = arg;
1315 struct i40e_hw *hw = &pf->hw;
1320 reg = rd32(hw, I40E_PFINT_ICR0);
1321 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1323 /* Check on the cause */
1324 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1325 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1327 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1328 ixl_handle_mdd_event(pf);
1329 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1332 if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1333 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1335 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1336 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1337 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1339 taskqueue_enqueue(pf->tq, &pf->adminq);
1343 /*********************************************************************
1345 * Media Ioctl callback
1347 * This routine is called whenever the user queries the status of
1348 * the interface using ifconfig.
1350 **********************************************************************/
1352 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1354 struct ixl_vsi *vsi = ifp->if_softc;
1355 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1356 struct i40e_hw *hw = &pf->hw;
1358 INIT_DEBUGOUT("ixl_media_status: begin");
1361 ixl_update_link_status(pf);
1363 ifmr->ifm_status = IFM_AVALID;
1364 ifmr->ifm_active = IFM_ETHER;
1366 if (!vsi->link_up) {
1371 ifmr->ifm_status |= IFM_ACTIVE;
1372 /* Hardware is always full-duplex */
1373 ifmr->ifm_active |= IFM_FDX;
1375 switch (hw->phy.link_info.phy_type) {
1377 case I40E_PHY_TYPE_100BASE_TX:
1378 ifmr->ifm_active |= IFM_100_TX;
1381 case I40E_PHY_TYPE_1000BASE_T:
1382 ifmr->ifm_active |= IFM_1000_T;
1384 case I40E_PHY_TYPE_1000BASE_SX:
1385 ifmr->ifm_active |= IFM_1000_SX;
1387 case I40E_PHY_TYPE_1000BASE_LX:
1388 ifmr->ifm_active |= IFM_1000_LX;
1391 case I40E_PHY_TYPE_10GBASE_CR1:
1392 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1393 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1394 /* Using this until a real KR media type */
1395 case I40E_PHY_TYPE_10GBASE_KR:
1396 case I40E_PHY_TYPE_10GBASE_KX4:
1397 ifmr->ifm_active |= IFM_10G_TWINAX;
1399 case I40E_PHY_TYPE_10GBASE_SR:
1400 ifmr->ifm_active |= IFM_10G_SR;
1402 case I40E_PHY_TYPE_10GBASE_LR:
1403 ifmr->ifm_active |= IFM_10G_LR;
1405 case I40E_PHY_TYPE_10GBASE_T:
1406 ifmr->ifm_active |= IFM_10G_T;
1409 case I40E_PHY_TYPE_40GBASE_CR4:
1410 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1411 ifmr->ifm_active |= IFM_40G_CR4;
1413 case I40E_PHY_TYPE_40GBASE_SR4:
1414 ifmr->ifm_active |= IFM_40G_SR4;
1416 case I40E_PHY_TYPE_40GBASE_LR4:
1417 ifmr->ifm_active |= IFM_40G_LR4;
1420 ** Set these to CR4 because OS does not
1421 ** have types available yet.
1423 case I40E_PHY_TYPE_40GBASE_KR4:
1424 case I40E_PHY_TYPE_XLAUI:
1425 case I40E_PHY_TYPE_XLPPI:
1426 case I40E_PHY_TYPE_40GBASE_AOC:
1427 ifmr->ifm_active |= IFM_40G_CR4;
1430 ifmr->ifm_active |= IFM_UNKNOWN;
1433 /* Report flow control status as well */
1434 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1435 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1436 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1437 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1444 /*********************************************************************
1446 * Media Ioctl callback
1448 * This routine is called when the user changes speed/duplex using
1449 * media/mediopt option with ifconfig.
1451 **********************************************************************/
1453 ixl_media_change(struct ifnet * ifp)
1455 struct ixl_vsi *vsi = ifp->if_softc;
1456 struct ifmedia *ifm = &vsi->media;
1458 INIT_DEBUGOUT("ixl_media_change: begin");
1460 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1463 if_printf(ifp, "Media change is currently not supported.\n");
1471 ** ATR: Application Targetted Receive - creates a filter
1472 ** based on TX flow info that will keep the receive
1473 ** portion of the flow on the same queue. Based on the
1474 ** implementation this is only available for TCP connections
1477 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1479 struct ixl_vsi *vsi = que->vsi;
1480 struct tx_ring *txr = &que->txr;
1481 struct i40e_filter_program_desc *FDIR;
1485 /* check if ATR is enabled and sample rate */
1486 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1489 ** We sample all TCP SYN/FIN packets,
1490 ** or at the selected sample rate
1493 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1494 (txr->atr_count < txr->atr_rate))
1498 /* Get a descriptor to use */
1499 idx = txr->next_avail;
1500 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1501 if (++idx == que->num_desc)
1504 txr->next_avail = idx;
1506 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1507 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1509 ptype |= (etype == ETHERTYPE_IP) ?
1510 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1511 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1512 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1513 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1515 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1517 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1520 ** We use the TCP TH_FIN as a trigger to remove
1521 ** the filter, otherwise its an update.
1523 dtype |= (th->th_flags & TH_FIN) ?
1524 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1525 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1526 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1527 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1529 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1530 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1532 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1533 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1535 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1536 FDIR->dtype_cmd_cntindex = htole32(dtype);
1543 ixl_set_promisc(struct ixl_vsi *vsi)
1545 struct ifnet *ifp = vsi->ifp;
1546 struct i40e_hw *hw = vsi->hw;
1548 bool uni = FALSE, multi = FALSE;
1550 if (ifp->if_flags & IFF_ALLMULTI)
1552 else { /* Need to count the multicast addresses */
1553 struct ifmultiaddr *ifma;
1554 if_maddr_rlock(ifp);
1555 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1556 if (ifma->ifma_addr->sa_family != AF_LINK)
1558 if (mcnt == MAX_MULTICAST_ADDR)
1562 if_maddr_runlock(ifp);
1565 if (mcnt >= MAX_MULTICAST_ADDR)
1567 if (ifp->if_flags & IFF_PROMISC)
1570 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1571 vsi->seid, uni, NULL);
1572 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1573 vsi->seid, multi, NULL);
1577 /*********************************************************************
1580 * Routines for multicast and vlan filter management.
1582 *********************************************************************/
1584 ixl_add_multi(struct ixl_vsi *vsi)
1586 struct ifmultiaddr *ifma;
1587 struct ifnet *ifp = vsi->ifp;
1588 struct i40e_hw *hw = vsi->hw;
1589 int mcnt = 0, flags;
1591 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1593 if_maddr_rlock(ifp);
1595 ** First just get a count, to decide if we
1596 ** we simply use multicast promiscuous.
1598 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1599 if (ifma->ifma_addr->sa_family != AF_LINK)
1603 if_maddr_runlock(ifp);
1605 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1606 /* delete existing MC filters */
1607 ixl_del_hw_filters(vsi, mcnt);
1608 i40e_aq_set_vsi_multicast_promiscuous(hw,
1609 vsi->seid, TRUE, NULL);
1614 if_maddr_rlock(ifp);
1615 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1616 if (ifma->ifma_addr->sa_family != AF_LINK)
1618 ixl_add_mc_filter(vsi,
1619 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1622 if_maddr_runlock(ifp);
1624 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1625 ixl_add_hw_filters(vsi, flags, mcnt);
1628 IOCTL_DEBUGOUT("ixl_add_multi: end");
1633 ixl_del_multi(struct ixl_vsi *vsi)
1635 struct ifnet *ifp = vsi->ifp;
1636 struct ifmultiaddr *ifma;
1637 struct ixl_mac_filter *f;
1641 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1643 /* Search for removed multicast addresses */
1644 if_maddr_rlock(ifp);
1645 SLIST_FOREACH(f, &vsi->ftl, next) {
1646 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1648 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1649 if (ifma->ifma_addr->sa_family != AF_LINK)
1651 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1652 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1657 if (match == FALSE) {
1658 f->flags |= IXL_FILTER_DEL;
1663 if_maddr_runlock(ifp);
1666 ixl_del_hw_filters(vsi, mcnt);
1670 /*********************************************************************
1673 * This routine checks for link status,updates statistics,
1674 * and runs the watchdog check.
1676 **********************************************************************/
1679 ixl_local_timer(void *arg)
1681 struct ixl_pf *pf = arg;
1682 struct i40e_hw *hw = &pf->hw;
1683 struct ixl_vsi *vsi = &pf->vsi;
1684 struct ixl_queue *que = vsi->queues;
1685 device_t dev = pf->dev;
1689 mtx_assert(&pf->pf_mtx, MA_OWNED);
1691 /* Fire off the adminq task */
1692 taskqueue_enqueue(pf->tq, &pf->adminq);
1695 ixl_update_stats_counters(pf);
1698 ** Check status of the queues
1700 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1701 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1703 for (int i = 0; i < vsi->num_queues; i++,que++) {
1704 /* Any queues with outstanding work get a sw irq */
1706 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1708 ** Each time txeof runs without cleaning, but there
1709 ** are uncleaned descriptors it increments busy. If
1710 ** we get to 5 we declare it hung.
1712 if (que->busy == IXL_QUEUE_HUNG) {
1714 /* Mark the queue as inactive */
1715 vsi->active_queues &= ~((u64)1 << que->me);
1718 /* Check if we've come back from hung */
1719 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1720 vsi->active_queues |= ((u64)1 << que->me);
1722 if (que->busy >= IXL_MAX_TX_BUSY) {
1724 device_printf(dev,"Warning queue %d "
1725 "appears to be hung!\n", i);
1727 que->busy = IXL_QUEUE_HUNG;
1731 /* Only reinit if all queues show hung */
1732 if (hung == vsi->num_queues)
1735 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1739 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1740 ixl_init_locked(pf);
1744 ** Note: this routine updates the OS on the link state
1745 ** the real check of the hardware only happens with
1746 ** a link interrupt.
1749 ixl_update_link_status(struct ixl_pf *pf)
1751 struct ixl_vsi *vsi = &pf->vsi;
1752 struct i40e_hw *hw = &pf->hw;
1753 struct ifnet *ifp = vsi->ifp;
1754 device_t dev = pf->dev;
1758 if (vsi->link_active == FALSE) {
1759 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1760 pf->fc = hw->fc.current_mode;
1762 device_printf(dev,"Link is up %d Gbps %s,"
1763 " Flow Control: %s\n",
1764 ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1765 "Full Duplex", ixl_fc_string[pf->fc]);
1767 vsi->link_active = TRUE;
1769 ** Warn user if link speed on NPAR enabled
1770 ** partition is not at least 10GB
1772 if (hw->func_caps.npar_enable &&
1773 (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1774 hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1775 device_printf(dev, "The partition detected link"
1776 "speed that is less than 10Gbps\n");
1777 if_link_state_change(ifp, LINK_STATE_UP);
1779 } else { /* Link down */
1780 if (vsi->link_active == TRUE) {
1782 device_printf(dev,"Link is Down\n");
1783 if_link_state_change(ifp, LINK_STATE_DOWN);
1784 vsi->link_active = FALSE;
1791 /*********************************************************************
1793 * This routine disables all traffic on the adapter by issuing a
1794 * global reset on the MAC and deallocates TX/RX buffers.
1796 **********************************************************************/
1799 ixl_stop(struct ixl_pf *pf)
1801 struct ixl_vsi *vsi = &pf->vsi;
1802 struct ifnet *ifp = vsi->ifp;
1804 mtx_assert(&pf->pf_mtx, MA_OWNED);
1806 INIT_DEBUGOUT("ixl_stop: begin\n");
1807 ixl_disable_intr(vsi);
1808 ixl_disable_rings(vsi);
1810 /* Tell the stack that the interface is no longer active */
1811 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1813 /* Stop the local timer */
1814 callout_stop(&pf->timer);
1820 /*********************************************************************
1822 * Setup MSIX Interrupt resources and handlers for the VSI
1824 **********************************************************************/
1826 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1828 device_t dev = pf->dev;
1829 struct ixl_vsi *vsi = &pf->vsi;
1830 struct ixl_queue *que = vsi->queues;
1835 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1836 &rid, RF_SHAREABLE | RF_ACTIVE);
1837 if (pf->res == NULL) {
1838 device_printf(dev,"Unable to allocate"
1839 " bus resource: vsi legacy/msi interrupt\n");
1843 /* Set the handler function */
1844 error = bus_setup_intr(dev, pf->res,
1845 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1846 ixl_intr, pf, &pf->tag);
1849 device_printf(dev, "Failed to register legacy/msi handler");
1852 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1853 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1854 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1855 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1856 taskqueue_thread_enqueue, &que->tq);
1857 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1858 device_get_nameunit(dev));
1859 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1860 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1861 taskqueue_thread_enqueue, &pf->tq);
1862 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1863 device_get_nameunit(dev));
1869 /*********************************************************************
1871 * Setup MSIX Interrupt resources and handlers for the VSI
1873 **********************************************************************/
1875 ixl_assign_vsi_msix(struct ixl_pf *pf)
1877 device_t dev = pf->dev;
1878 struct ixl_vsi *vsi = &pf->vsi;
1879 struct ixl_queue *que = vsi->queues;
1880 struct tx_ring *txr;
1881 int error, rid, vector = 0;
1883 /* Admin Que is vector 0*/
1885 pf->res = bus_alloc_resource_any(dev,
1886 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1888 device_printf(dev,"Unable to allocate"
1889 " bus resource: Adminq interrupt [%d]\n", rid);
1892 /* Set the adminq vector and handler */
1893 error = bus_setup_intr(dev, pf->res,
1894 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1895 ixl_msix_adminq, pf, &pf->tag);
1898 device_printf(dev, "Failed to register Admin que handler");
1901 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1902 pf->admvec = vector;
1903 /* Tasklet for Admin Queue */
1904 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1905 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1906 taskqueue_thread_enqueue, &pf->tq);
1907 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1908 device_get_nameunit(pf->dev));
1911 /* Now set up the stations */
1912 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1916 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1917 RF_SHAREABLE | RF_ACTIVE);
1918 if (que->res == NULL) {
1919 device_printf(dev,"Unable to allocate"
1920 " bus resource: que interrupt [%d]\n", vector);
1923 /* Set the handler function */
1924 error = bus_setup_intr(dev, que->res,
1925 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1926 ixl_msix_que, que, &que->tag);
1929 device_printf(dev, "Failed to register que handler");
1932 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1933 /* Bind the vector to a CPU */
1935 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1937 bus_bind_intr(dev, que->res, cpu_id);
1939 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1940 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1941 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1942 taskqueue_thread_enqueue, &que->tq);
1944 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1945 cpu_id, "%s (bucket %d)",
1946 device_get_nameunit(dev), cpu_id);
1948 taskqueue_start_threads(&que->tq, 1, PI_NET,
1949 "%s que", device_get_nameunit(dev));
1958 * Allocate MSI/X vectors
1961 ixl_init_msix(struct ixl_pf *pf)
1963 device_t dev = pf->dev;
1964 int rid, want, vectors, queues, available;
1966 /* Override by tuneable */
1967 if (ixl_enable_msix == 0)
1971 ** When used in a virtualized environment
1972 ** PCI BUSMASTER capability may not be set
1973 ** so explicity set it here and rewrite
1974 ** the ENABLE in the MSIX control register
1975 ** at this point to cause the host to
1976 ** successfully initialize us.
1981 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1982 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1983 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1984 pci_find_cap(dev, PCIY_MSIX, &rid);
1985 rid += PCIR_MSIX_CTRL;
1986 msix_ctrl = pci_read_config(dev, rid, 2);
1987 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1988 pci_write_config(dev, rid, msix_ctrl, 2);
1991 /* First try MSI/X */
1992 rid = PCIR_BAR(IXL_BAR);
1993 pf->msix_mem = bus_alloc_resource_any(dev,
1994 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1995 if (!pf->msix_mem) {
1996 /* May not be enabled */
1997 device_printf(pf->dev,
1998 "Unable to map MSIX table \n");
2002 available = pci_msix_count(dev);
2003 if (available == 0) { /* system has msix disabled */
2004 bus_release_resource(dev, SYS_RES_MEMORY,
2006 pf->msix_mem = NULL;
2010 /* Figure out a reasonable auto config value */
2011 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2013 /* Override with hardcoded value if sane */
2014 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2015 queues = ixl_max_queues;
2018 /* If we're doing RSS, clamp at the number of RSS buckets */
2019 if (queues > rss_getnumbuckets())
2020 queues = rss_getnumbuckets();
2024 ** Want one vector (RX/TX pair) per queue
2025 ** plus an additional for the admin queue.
2028 if (want <= available) /* Have enough */
2031 device_printf(pf->dev,
2032 "MSIX Configuration Problem, "
2033 "%d vectors available but %d wanted!\n",
2035 return (0); /* Will go to Legacy setup */
2038 if (pci_alloc_msix(dev, &vectors) == 0) {
2039 device_printf(pf->dev,
2040 "Using MSIX interrupts with %d vectors\n", vectors);
2042 pf->vsi.num_queues = queues;
2045 * If we're doing RSS, the number of queues needs to
2046 * match the number of RSS buckets that are configured.
2048 * + If there's more queues than RSS buckets, we'll end
2049 * up with queues that get no traffic.
2051 * + If there's more RSS buckets than queues, we'll end
2052 * up having multiple RSS buckets map to the same queue,
2053 * so there'll be some contention.
2055 if (queues != rss_getnumbuckets()) {
2057 "%s: queues (%d) != RSS buckets (%d)"
2058 "; performance will be impacted.\n",
2059 __func__, queues, rss_getnumbuckets());
2065 vectors = pci_msi_count(dev);
2066 pf->vsi.num_queues = 1;
2069 ixl_enable_msix = 0;
2070 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2071 device_printf(pf->dev,"Using an MSI interrupt\n");
2074 device_printf(pf->dev,"Using a Legacy interrupt\n");
2081 * Plumb MSI/X vectors
2084 ixl_configure_msix(struct ixl_pf *pf)
2086 struct i40e_hw *hw = &pf->hw;
2087 struct ixl_vsi *vsi = &pf->vsi;
2091 /* First set up the adminq - vector 0 */
2092 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2093 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2095 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2096 I40E_PFINT_ICR0_ENA_GRST_MASK |
2097 I40E_PFINT_ICR0_HMC_ERR_MASK |
2098 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2099 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2100 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2101 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2102 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2104 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2105 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2107 wr32(hw, I40E_PFINT_DYN_CTL0,
2108 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2109 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2111 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2113 /* Next configure the queues */
2114 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2115 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2116 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2118 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2119 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2120 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2121 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2122 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2123 wr32(hw, I40E_QINT_RQCTL(i), reg);
2125 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2126 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2127 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2128 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2129 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2130 if (i == (vsi->num_queues - 1))
2131 reg |= (IXL_QUEUE_EOL
2132 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2133 wr32(hw, I40E_QINT_TQCTL(i), reg);
2138 * Configure for MSI single vector operation
2141 ixl_configure_legacy(struct ixl_pf *pf)
2143 struct i40e_hw *hw = &pf->hw;
2147 wr32(hw, I40E_PFINT_ITR0(0), 0);
2148 wr32(hw, I40E_PFINT_ITR0(1), 0);
2151 /* Setup "other" causes */
2152 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2153 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2154 | I40E_PFINT_ICR0_ENA_GRST_MASK
2155 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2156 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2157 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2158 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2159 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2160 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2161 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2163 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2165 /* SW_ITR_IDX = 0, but don't change INTENA */
2166 wr32(hw, I40E_PFINT_DYN_CTL0,
2167 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2168 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2169 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2170 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2172 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2173 wr32(hw, I40E_PFINT_LNKLST0, 0);
2175 /* Associate the queue pair to the vector and enable the q int */
2176 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2177 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2178 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2179 wr32(hw, I40E_QINT_RQCTL(0), reg);
2181 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2182 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2183 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2184 wr32(hw, I40E_QINT_TQCTL(0), reg);
2186 /* Next enable the queue pair */
2187 reg = rd32(hw, I40E_QTX_ENA(0));
2188 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2189 wr32(hw, I40E_QTX_ENA(0), reg);
2191 reg = rd32(hw, I40E_QRX_ENA(0));
2192 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2193 wr32(hw, I40E_QRX_ENA(0), reg);
2198 * Set the Initial ITR state
2201 ixl_configure_itr(struct ixl_pf *pf)
2203 struct i40e_hw *hw = &pf->hw;
2204 struct ixl_vsi *vsi = &pf->vsi;
2205 struct ixl_queue *que = vsi->queues;
2207 vsi->rx_itr_setting = ixl_rx_itr;
2208 if (ixl_dynamic_rx_itr)
2209 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2210 vsi->tx_itr_setting = ixl_tx_itr;
2211 if (ixl_dynamic_tx_itr)
2212 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2214 for (int i = 0; i < vsi->num_queues; i++, que++) {
2215 struct tx_ring *txr = &que->txr;
2216 struct rx_ring *rxr = &que->rxr;
2218 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2219 vsi->rx_itr_setting);
2220 rxr->itr = vsi->rx_itr_setting;
2221 rxr->latency = IXL_AVE_LATENCY;
2222 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2223 vsi->tx_itr_setting);
2224 txr->itr = vsi->tx_itr_setting;
2225 txr->latency = IXL_AVE_LATENCY;
2231 ixl_allocate_pci_resources(struct ixl_pf *pf)
2234 device_t dev = pf->dev;
2237 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2240 if (!(pf->pci_mem)) {
2241 device_printf(dev,"Unable to allocate bus resource: memory\n");
2245 pf->osdep.mem_bus_space_tag =
2246 rman_get_bustag(pf->pci_mem);
2247 pf->osdep.mem_bus_space_handle =
2248 rman_get_bushandle(pf->pci_mem);
2249 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2250 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2251 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2253 pf->hw.back = &pf->osdep;
2256 ** Now setup MSI or MSI/X, should
2257 ** return us the number of supported
2258 ** vectors. (Will be 1 for MSI)
2260 pf->msix = ixl_init_msix(pf);
2265 ixl_free_pci_resources(struct ixl_pf * pf)
2267 struct ixl_vsi *vsi = &pf->vsi;
2268 struct ixl_queue *que = vsi->queues;
2269 device_t dev = pf->dev;
2272 memrid = PCIR_BAR(IXL_BAR);
2274 /* We may get here before stations are setup */
2275 if ((!ixl_enable_msix) || (que == NULL))
2279 ** Release all msix VSI resources:
2281 for (int i = 0; i < vsi->num_queues; i++, que++) {
2282 rid = que->msix + 1;
2283 if (que->tag != NULL) {
2284 bus_teardown_intr(dev, que->res, que->tag);
2287 if (que->res != NULL)
2288 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2292 /* Clean the AdminQ interrupt last */
2293 if (pf->admvec) /* we are doing MSIX */
2294 rid = pf->admvec + 1;
2296 (pf->msix != 0) ? (rid = 1):(rid = 0);
2298 if (pf->tag != NULL) {
2299 bus_teardown_intr(dev, pf->res, pf->tag);
2302 if (pf->res != NULL)
2303 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2306 pci_release_msi(dev);
2308 if (pf->msix_mem != NULL)
2309 bus_release_resource(dev, SYS_RES_MEMORY,
2310 memrid, pf->msix_mem);
2312 if (pf->pci_mem != NULL)
2313 bus_release_resource(dev, SYS_RES_MEMORY,
2314 PCIR_BAR(0), pf->pci_mem);
2320 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2322 /* Display supported media types */
2323 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2324 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2326 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2327 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2329 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2330 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2331 phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2332 phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2333 phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2334 phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2335 phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2336 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2337 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2339 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2340 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2341 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2342 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2343 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2344 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2346 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2347 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2348 phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2349 phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2350 phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2351 /* KR4 uses CR4 until the OS has the real media type */
2352 phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2353 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2355 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2356 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2357 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2358 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2361 /*********************************************************************
2363 * Setup networking device structure and register an interface.
2365 **********************************************************************/
2367 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2370 struct i40e_hw *hw = vsi->hw;
2371 struct ixl_queue *que = vsi->queues;
2372 struct i40e_aq_get_phy_abilities_resp abilities;
2373 enum i40e_status_code aq_error = 0;
2375 INIT_DEBUGOUT("ixl_setup_interface: begin");
2377 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2379 device_printf(dev, "can not allocate ifnet structure\n");
2382 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2383 ifp->if_mtu = ETHERMTU;
2384 if_initbaudrate(ifp, IF_Gbps(40));
2385 ifp->if_init = ixl_init;
2386 ifp->if_softc = vsi;
2387 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2388 ifp->if_ioctl = ixl_ioctl;
2390 #if __FreeBSD_version >= 1100036
2391 if_setgetcounterfn(ifp, ixl_get_counter);
2394 ifp->if_transmit = ixl_mq_start;
2396 ifp->if_qflush = ixl_qflush;
2398 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2400 vsi->max_frame_size =
2401 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2402 + ETHER_VLAN_ENCAP_LEN;
2405 * Tell the upper layer(s) we support long frames.
2407 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2409 ifp->if_capabilities |= IFCAP_HWCSUM;
2410 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2411 ifp->if_capabilities |= IFCAP_TSO;
2412 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2413 ifp->if_capabilities |= IFCAP_LRO;
2415 /* VLAN capabilties */
2416 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2419 | IFCAP_VLAN_HWCSUM;
2420 ifp->if_capenable = ifp->if_capabilities;
2423 ** Don't turn this on by default, if vlans are
2424 ** created on another pseudo device (eg. lagg)
2425 ** then vlan events are not passed thru, breaking
2426 ** operation, but with HW FILTER off it works. If
2427 ** using vlans directly on the ixl driver you can
2428 ** enable this and get full hardware tag filtering.
2430 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2433 * Specify the media types supported by this adapter and register
2434 * callbacks to update media and link information
2436 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2439 aq_error = i40e_aq_get_phy_capabilities(hw,
2440 FALSE, TRUE, &abilities, NULL);
2441 /* May need delay to detect fiber correctly */
2442 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2443 i40e_msec_delay(200);
2444 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2445 TRUE, &abilities, NULL);
2448 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2449 device_printf(dev, "Unknown PHY type detected!\n");
2452 "Error getting supported media types, err %d,"
2453 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2457 ixl_add_ifmedia(vsi, abilities.phy_type);
2459 /* Use autoselect media by default */
2460 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2461 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2463 ether_ifattach(ifp, hw->mac.addr);
2469 ixl_config_link(struct i40e_hw *hw)
2473 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2474 check = i40e_get_link_status(hw);
2476 printf("Link is %s\n", check ? "up":"down");
2481 /*********************************************************************
2483 * Get Firmware Switch configuration
2484 * - this will need to be more robust when more complex
2485 * switch configurations are enabled.
2487 **********************************************************************/
2489 ixl_switch_config(struct ixl_pf *pf)
2491 struct i40e_hw *hw = &pf->hw;
2492 struct ixl_vsi *vsi = &pf->vsi;
2493 device_t dev = vsi->dev;
2494 struct i40e_aqc_get_switch_config_resp *sw_config;
2495 u8 aq_buf[I40E_AQ_LARGE_BUF];
2496 int ret = I40E_SUCCESS;
2499 memset(&aq_buf, 0, sizeof(aq_buf));
2500 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2501 ret = i40e_aq_get_switch_config(hw, sw_config,
2502 sizeof(aq_buf), &next, NULL);
2504 device_printf(dev,"aq_get_switch_config failed!!\n");
2508 printf("Switch config: header reported: %d in structure, %d total\n",
2509 sw_config->header.num_reported, sw_config->header.num_total);
2510 printf("type=%d seid=%d uplink=%d downlink=%d\n",
2511 sw_config->element[0].element_type,
2512 sw_config->element[0].seid,
2513 sw_config->element[0].uplink_seid,
2514 sw_config->element[0].downlink_seid);
2516 /* Simplified due to a single VSI at the moment */
2517 vsi->seid = sw_config->element[0].seid;
2521 /*********************************************************************
2523 * Initialize the VSI: this handles contexts, which means things
2524 * like the number of descriptors, buffer size,
2525 * plus we init the rings thru this function.
2527 **********************************************************************/
2529 ixl_initialize_vsi(struct ixl_vsi *vsi)
2531 struct ixl_queue *que = vsi->queues;
2532 device_t dev = vsi->dev;
2533 struct i40e_hw *hw = vsi->hw;
2534 struct i40e_vsi_context ctxt;
2537 memset(&ctxt, 0, sizeof(ctxt));
2538 ctxt.seid = vsi->seid;
2539 ctxt.pf_num = hw->pf_id;
2540 err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2542 device_printf(dev,"get vsi params failed %x!!\n", err);
2546 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2547 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2548 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2549 ctxt.uplink_seid, ctxt.vsi_number,
2550 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2551 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2552 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2555 ** Set the queue and traffic class bits
2556 ** - when multiple traffic classes are supported
2557 ** this will need to be more robust.
2559 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2560 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2561 ctxt.info.queue_mapping[0] = 0;
2562 ctxt.info.tc_mapping[0] = 0x0800;
2564 /* Set VLAN receive stripping mode */
2565 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2566 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2567 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2568 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2570 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2572 /* Keep copy of VSI info in VSI for statistic counters */
2573 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2575 /* Reset VSI statistics */
2576 ixl_vsi_reset_stats(vsi);
2577 vsi->hw_filters_add = 0;
2578 vsi->hw_filters_del = 0;
2580 err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2582 device_printf(dev,"update vsi params failed %x!!\n",
2583 hw->aq.asq_last_status);
2587 for (int i = 0; i < vsi->num_queues; i++, que++) {
2588 struct tx_ring *txr = &que->txr;
2589 struct rx_ring *rxr = &que->rxr;
2590 struct i40e_hmc_obj_txq tctx;
2591 struct i40e_hmc_obj_rxq rctx;
2596 /* Setup the HMC TX Context */
2597 size = que->num_desc * sizeof(struct i40e_tx_desc);
2598 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2599 tctx.new_context = 1;
2600 tctx.base = (txr->dma.pa/128);
2601 tctx.qlen = que->num_desc;
2603 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2604 /* Enable HEAD writeback */
2605 tctx.head_wb_ena = 1;
2606 tctx.head_wb_addr = txr->dma.pa +
2607 (que->num_desc * sizeof(struct i40e_tx_desc));
2608 tctx.rdylist_act = 0;
2609 err = i40e_clear_lan_tx_queue_context(hw, i);
2611 device_printf(dev, "Unable to clear TX context\n");
2614 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2616 device_printf(dev, "Unable to set TX context\n");
2619 /* Associate the ring with this PF */
2620 txctl = I40E_QTX_CTL_PF_QUEUE;
2621 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2622 I40E_QTX_CTL_PF_INDX_MASK);
2623 wr32(hw, I40E_QTX_CTL(i), txctl);
2626 /* Do ring (re)init */
2627 ixl_init_tx_ring(que);
2629 /* Next setup the HMC RX Context */
2630 if (vsi->max_frame_size <= 2048)
2631 rxr->mbuf_sz = MCLBYTES;
2633 rxr->mbuf_sz = MJUMPAGESIZE;
2635 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2637 /* Set up an RX context for the HMC */
2638 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2639 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2640 /* ignore header split for now */
2641 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2642 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2643 vsi->max_frame_size : max_rxmax;
2645 rctx.dsize = 1; /* do 32byte descriptors */
2646 rctx.hsplit_0 = 0; /* no HDR split initially */
2647 rctx.base = (rxr->dma.pa/128);
2648 rctx.qlen = que->num_desc;
2649 rctx.tphrdesc_ena = 1;
2650 rctx.tphwdesc_ena = 1;
2651 rctx.tphdata_ena = 0;
2652 rctx.tphhead_ena = 0;
2653 rctx.lrxqthresh = 2;
2660 err = i40e_clear_lan_rx_queue_context(hw, i);
2663 "Unable to clear RX context %d\n", i);
2666 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2668 device_printf(dev, "Unable to set RX context %d\n", i);
2671 err = ixl_init_rx_ring(que);
2673 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2676 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2678 /* preserve queue */
2679 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2680 struct netmap_adapter *na = NA(vsi->ifp);
2681 struct netmap_kring *kring = &na->rx_rings[i];
2682 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2683 wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2685 #endif /* DEV_NETMAP */
2686 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2692 /*********************************************************************
2694 * Free all VSI structs.
2696 **********************************************************************/
2698 ixl_free_vsi(struct ixl_vsi *vsi)
2700 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2701 struct ixl_queue *que = vsi->queues;
2702 struct ixl_mac_filter *f;
2704 /* Free station queues */
2705 for (int i = 0; i < vsi->num_queues; i++, que++) {
2706 struct tx_ring *txr = &que->txr;
2707 struct rx_ring *rxr = &que->rxr;
2709 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2712 ixl_free_que_tx(que);
2714 i40e_free_dma_mem(&pf->hw, &txr->dma);
2716 IXL_TX_LOCK_DESTROY(txr);
2718 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2721 ixl_free_que_rx(que);
2723 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2725 IXL_RX_LOCK_DESTROY(rxr);
2728 free(vsi->queues, M_DEVBUF);
2730 /* Free VSI filter list */
2731 while (!SLIST_EMPTY(&vsi->ftl)) {
2732 f = SLIST_FIRST(&vsi->ftl);
2733 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2739 /*********************************************************************
2741 * Allocate memory for the VSI (virtual station interface) and their
2742 * associated queues, rings and the descriptors associated with each,
2743 * called only once at attach.
2745 **********************************************************************/
2747 ixl_setup_stations(struct ixl_pf *pf)
2749 device_t dev = pf->dev;
2750 struct ixl_vsi *vsi;
2751 struct ixl_queue *que;
2752 struct tx_ring *txr;
2753 struct rx_ring *rxr;
2755 int error = I40E_SUCCESS;
2758 vsi->back = (void *)pf;
2763 /* Get memory for the station queues */
2765 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2766 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2767 device_printf(dev, "Unable to allocate queue memory\n");
2772 for (int i = 0; i < vsi->num_queues; i++) {
2773 que = &vsi->queues[i];
2774 que->num_desc = ixl_ringsz;
2777 /* mark the queue as active */
2778 vsi->active_queues |= (u64)1 << que->me;
2781 txr->tail = I40E_QTX_TAIL(que->me);
2783 /* Initialize the TX lock */
2784 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2785 device_get_nameunit(dev), que->me);
2786 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2787 /* Create the TX descriptor ring */
2788 tsize = roundup2((que->num_desc *
2789 sizeof(struct i40e_tx_desc)) +
2790 sizeof(u32), DBA_ALIGN);
2791 if (i40e_allocate_dma_mem(&pf->hw,
2792 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2794 "Unable to allocate TX Descriptor memory\n");
2798 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2799 bzero((void *)txr->base, tsize);
2800 /* Now allocate transmit soft structs for the ring */
2801 if (ixl_allocate_tx_data(que)) {
2803 "Critical Failure setting up TX structures\n");
2807 /* Allocate a buf ring */
2808 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2809 M_WAITOK, &txr->mtx);
2810 if (txr->br == NULL) {
2812 "Critical Failure setting up TX buf ring\n");
2818 * Next the RX queues...
2820 rsize = roundup2(que->num_desc *
2821 sizeof(union i40e_rx_desc), DBA_ALIGN);
2824 rxr->tail = I40E_QRX_TAIL(que->me);
2826 /* Initialize the RX side lock */
2827 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2828 device_get_nameunit(dev), que->me);
2829 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2831 if (i40e_allocate_dma_mem(&pf->hw,
2832 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2834 "Unable to allocate RX Descriptor memory\n");
2838 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2839 bzero((void *)rxr->base, rsize);
2841 /* Allocate receive soft structs for the ring*/
2842 if (ixl_allocate_rx_data(que)) {
2844 "Critical Failure setting up receive structs\n");
2853 for (int i = 0; i < vsi->num_queues; i++) {
2854 que = &vsi->queues[i];
2858 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2860 i40e_free_dma_mem(&pf->hw, &txr->dma);
2868 ** Provide a update to the queue RX
2869 ** interrupt moderation value.
2872 ixl_set_queue_rx_itr(struct ixl_queue *que)
2874 struct ixl_vsi *vsi = que->vsi;
2875 struct i40e_hw *hw = vsi->hw;
2876 struct rx_ring *rxr = &que->rxr;
2882 /* Idle, do nothing */
2883 if (rxr->bytes == 0)
2886 if (ixl_dynamic_rx_itr) {
2887 rx_bytes = rxr->bytes/rxr->itr;
2890 /* Adjust latency range */
2891 switch (rxr->latency) {
2892 case IXL_LOW_LATENCY:
2893 if (rx_bytes > 10) {
2894 rx_latency = IXL_AVE_LATENCY;
2895 rx_itr = IXL_ITR_20K;
2898 case IXL_AVE_LATENCY:
2899 if (rx_bytes > 20) {
2900 rx_latency = IXL_BULK_LATENCY;
2901 rx_itr = IXL_ITR_8K;
2902 } else if (rx_bytes <= 10) {
2903 rx_latency = IXL_LOW_LATENCY;
2904 rx_itr = IXL_ITR_100K;
2907 case IXL_BULK_LATENCY:
2908 if (rx_bytes <= 20) {
2909 rx_latency = IXL_AVE_LATENCY;
2910 rx_itr = IXL_ITR_20K;
2915 rxr->latency = rx_latency;
2917 if (rx_itr != rxr->itr) {
2918 /* do an exponential smoothing */
2919 rx_itr = (10 * rx_itr * rxr->itr) /
2920 ((9 * rx_itr) + rxr->itr);
2921 rxr->itr = rx_itr & IXL_MAX_ITR;
2922 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2923 que->me), rxr->itr);
2925 } else { /* We may have have toggled to non-dynamic */
2926 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2927 vsi->rx_itr_setting = ixl_rx_itr;
2928 /* Update the hardware if needed */
2929 if (rxr->itr != vsi->rx_itr_setting) {
2930 rxr->itr = vsi->rx_itr_setting;
2931 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2932 que->me), rxr->itr);
2942 ** Provide a update to the queue TX
2943 ** interrupt moderation value.
2946 ixl_set_queue_tx_itr(struct ixl_queue *que)
2948 struct ixl_vsi *vsi = que->vsi;
2949 struct i40e_hw *hw = vsi->hw;
2950 struct tx_ring *txr = &que->txr;
2956 /* Idle, do nothing */
2957 if (txr->bytes == 0)
2960 if (ixl_dynamic_tx_itr) {
2961 tx_bytes = txr->bytes/txr->itr;
2964 switch (txr->latency) {
2965 case IXL_LOW_LATENCY:
2966 if (tx_bytes > 10) {
2967 tx_latency = IXL_AVE_LATENCY;
2968 tx_itr = IXL_ITR_20K;
2971 case IXL_AVE_LATENCY:
2972 if (tx_bytes > 20) {
2973 tx_latency = IXL_BULK_LATENCY;
2974 tx_itr = IXL_ITR_8K;
2975 } else if (tx_bytes <= 10) {
2976 tx_latency = IXL_LOW_LATENCY;
2977 tx_itr = IXL_ITR_100K;
2980 case IXL_BULK_LATENCY:
2981 if (tx_bytes <= 20) {
2982 tx_latency = IXL_AVE_LATENCY;
2983 tx_itr = IXL_ITR_20K;
2988 txr->latency = tx_latency;
2990 if (tx_itr != txr->itr) {
2991 /* do an exponential smoothing */
2992 tx_itr = (10 * tx_itr * txr->itr) /
2993 ((9 * tx_itr) + txr->itr);
2994 txr->itr = tx_itr & IXL_MAX_ITR;
2995 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2996 que->me), txr->itr);
2999 } else { /* We may have have toggled to non-dynamic */
3000 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3001 vsi->tx_itr_setting = ixl_tx_itr;
3002 /* Update the hardware if needed */
3003 if (txr->itr != vsi->tx_itr_setting) {
3004 txr->itr = vsi->tx_itr_setting;
3005 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3006 que->me), txr->itr);
3016 ixl_add_hw_stats(struct ixl_pf *pf)
3018 device_t dev = pf->dev;
3019 struct ixl_vsi *vsi = &pf->vsi;
3020 struct ixl_queue *queues = vsi->queues;
3021 struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3022 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3024 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3025 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3026 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3028 struct sysctl_oid *vsi_node, *queue_node;
3029 struct sysctl_oid_list *vsi_list, *queue_list;
3031 struct tx_ring *txr;
3032 struct rx_ring *rxr;
3034 /* Driver statistics */
3035 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3036 CTLFLAG_RD, &pf->watchdog_events,
3037 "Watchdog timeouts");
3038 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3039 CTLFLAG_RD, &pf->admin_irq,
3040 "Admin Queue IRQ Handled");
3042 /* VSI statistics */
3043 #define QUEUE_NAME_LEN 32
3044 char queue_namebuf[QUEUE_NAME_LEN];
3046 // ERJ: Only one vsi now, re-do when >1 VSI enabled
3047 // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3048 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3049 CTLFLAG_RD, NULL, "VSI-specific stats");
3050 vsi_list = SYSCTL_CHILDREN(vsi_node);
3052 ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3054 /* Queue statistics */
3055 for (int q = 0; q < vsi->num_queues; q++) {
3056 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3057 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3058 CTLFLAG_RD, NULL, "Queue #");
3059 queue_list = SYSCTL_CHILDREN(queue_node);
3061 txr = &(queues[q].txr);
3062 rxr = &(queues[q].rxr);
3064 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3065 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3066 "m_defrag() failed");
3067 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3068 CTLFLAG_RD, &(queues[q].dropped_pkts),
3069 "Driver dropped packets");
3070 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3071 CTLFLAG_RD, &(queues[q].irqs),
3072 "irqs on this queue");
3073 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3074 CTLFLAG_RD, &(queues[q].tso),
3076 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3077 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3078 "Driver tx dma failure in xmit");
3079 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3080 CTLFLAG_RD, &(txr->no_desc),
3081 "Queue No Descriptor Available");
3082 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3083 CTLFLAG_RD, &(txr->total_packets),
3084 "Queue Packets Transmitted");
3085 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3086 CTLFLAG_RD, &(txr->tx_bytes),
3087 "Queue Bytes Transmitted");
3088 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3089 CTLFLAG_RD, &(rxr->rx_packets),
3090 "Queue Packets Received");
3091 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3092 CTLFLAG_RD, &(rxr->rx_bytes),
3093 "Queue Bytes Received");
3097 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3101 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3102 struct sysctl_oid_list *child,
3103 struct i40e_eth_stats *eth_stats)
3105 struct ixl_sysctl_info ctls[] =
3107 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3108 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3109 "Unicast Packets Received"},
3110 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3111 "Multicast Packets Received"},
3112 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3113 "Broadcast Packets Received"},
3114 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3115 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3116 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3117 {ð_stats->tx_multicast, "mcast_pkts_txd",
3118 "Multicast Packets Transmitted"},
3119 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3120 "Broadcast Packets Transmitted"},
3125 struct ixl_sysctl_info *entry = ctls;
3126 while (entry->stat != 0)
3128 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3129 CTLFLAG_RD, entry->stat,
3130 entry->description);
3136 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3137 struct sysctl_oid_list *child,
3138 struct i40e_hw_port_stats *stats)
3140 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3141 CTLFLAG_RD, NULL, "Mac Statistics");
3142 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3144 struct i40e_eth_stats *eth_stats = &stats->eth;
3145 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3147 struct ixl_sysctl_info ctls[] =
3149 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3150 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3151 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3152 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3153 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3154 /* Packet Reception Stats */
3155 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3156 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3157 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3158 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3159 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3160 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3161 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3162 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3163 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3164 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3165 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3166 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3167 /* Packet Transmission Stats */
3168 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3169 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3170 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3171 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3172 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3173 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3174 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3176 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3177 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3178 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3179 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3184 struct ixl_sysctl_info *entry = ctls;
3185 while (entry->stat != 0)
3187 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3188 CTLFLAG_RD, entry->stat,
3189 entry->description);
3195 ** ixl_config_rss - setup RSS
3196 ** - note this is done for the single vsi
3198 static void ixl_config_rss(struct ixl_vsi *vsi)
3200 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3201 struct i40e_hw *hw = vsi->hw;
3203 u64 set_hena = 0, hena;
3206 u32 rss_hash_config;
3207 u32 rss_seed[IXL_KEYSZ];
3209 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3210 0x183cfd8c, 0xce880440, 0x580cbc3c,
3211 0x35897377, 0x328b25e1, 0x4fa98922,
3212 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3216 /* Fetch the configured RSS key */
3217 rss_getkey((uint8_t *) &rss_seed);
3220 /* Fill out hash function seed */
3221 for (i = 0; i < IXL_KEYSZ; i++)
3222 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3224 /* Enable PCTYPES for RSS: */
3226 rss_hash_config = rss_gethashconfig();
3227 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3228 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3229 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3230 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3231 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3232 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3233 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3234 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3235 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3236 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3237 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3238 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3239 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3240 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3243 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3244 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3245 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3246 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3247 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3248 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3249 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3250 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3251 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3252 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3253 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3255 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3256 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3258 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3259 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3261 /* Populate the LUT with max no. of queues in round robin fashion */
3262 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3263 if (j == vsi->num_queues)
3267 * Fetch the RSS bucket id for the given indirection entry.
3268 * Cap it at the number of configured buckets (which is
3271 que_id = rss_get_indirection_to_bucket(i);
3272 que_id = que_id % vsi->num_queues;
3276 /* lut = 4-byte sliding window of 4 lut entries */
3277 lut = (lut << 8) | (que_id &
3278 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3279 /* On i = 3, we have 4 entries in lut; write to the register */
3281 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3288 ** This routine is run via an vlan config EVENT,
3289 ** it enables us to use the HW Filter table since
3290 ** we can get the vlan id. This just creates the
3291 ** entry in the soft version of the VFTA, init will
3292 ** repopulate the real table.
3295 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3297 struct ixl_vsi *vsi = ifp->if_softc;
3298 struct i40e_hw *hw = vsi->hw;
3299 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3301 if (ifp->if_softc != arg) /* Not our event */
3304 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3309 ixl_add_filter(vsi, hw->mac.addr, vtag);
3314 ** This routine is run via an vlan
3315 ** unconfig EVENT, remove our entry
3316 ** in the soft vfta.
3319 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3321 struct ixl_vsi *vsi = ifp->if_softc;
3322 struct i40e_hw *hw = vsi->hw;
3323 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3325 if (ifp->if_softc != arg)
3328 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3333 ixl_del_filter(vsi, hw->mac.addr, vtag);
3338 ** This routine updates vlan filters, called by init
3339 ** it scans the filter table and then updates the hw
3340 ** after a soft reset.
3343 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3345 struct ixl_mac_filter *f;
3348 if (vsi->num_vlans == 0)
3351 ** Scan the filter list for vlan entries,
3352 ** mark them for addition and then call
3353 ** for the AQ update.
3355 SLIST_FOREACH(f, &vsi->ftl, next) {
3356 if (f->flags & IXL_FILTER_VLAN) {
3364 printf("setup vlan: no filters found!\n");
3367 flags = IXL_FILTER_VLAN;
3368 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3369 ixl_add_hw_filters(vsi, flags, cnt);
3374 ** Initialize filter list and add filters that the hardware
3375 ** needs to know about.
3378 ixl_init_filters(struct ixl_vsi *vsi)
3380 /* Add broadcast address */
3381 u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3382 ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3386 ** This routine adds mulicast filters
3389 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3391 struct ixl_mac_filter *f;
3393 /* Does one already exist */
3394 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3398 f = ixl_get_filter(vsi);
3400 printf("WARNING: no filter available!!\n");
3403 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3404 f->vlan = IXL_VLAN_ANY;
3405 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3412 ** This routine adds macvlan filters
3415 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3417 struct ixl_mac_filter *f, *tmp;
3418 device_t dev = vsi->dev;
3420 DEBUGOUT("ixl_add_filter: begin");
3422 /* Does one already exist */
3423 f = ixl_find_filter(vsi, macaddr, vlan);
3427 ** Is this the first vlan being registered, if so we
3428 ** need to remove the ANY filter that indicates we are
3429 ** not in a vlan, and replace that with a 0 filter.
3431 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3432 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3434 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3435 ixl_add_filter(vsi, macaddr, 0);
3439 f = ixl_get_filter(vsi);
3441 device_printf(dev, "WARNING: no filter available!!\n");
3444 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3446 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3447 if (f->vlan != IXL_VLAN_ANY)
3448 f->flags |= IXL_FILTER_VLAN;
3450 ixl_add_hw_filters(vsi, f->flags, 1);
3455 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3457 struct ixl_mac_filter *f;
3459 f = ixl_find_filter(vsi, macaddr, vlan);
3463 f->flags |= IXL_FILTER_DEL;
3464 ixl_del_hw_filters(vsi, 1);
3466 /* Check if this is the last vlan removal */
3467 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3468 /* Switch back to a non-vlan filter */
3469 ixl_del_filter(vsi, macaddr, 0);
3470 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3476 ** Find the filter with both matching mac addr and vlan id
3478 static struct ixl_mac_filter *
3479 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3481 struct ixl_mac_filter *f;
3484 SLIST_FOREACH(f, &vsi->ftl, next) {
3485 if (!cmp_etheraddr(f->macaddr, macaddr))
3487 if (f->vlan == vlan) {
3499 ** This routine takes additions to the vsi filter
3500 ** table and creates an Admin Queue call to create
3501 ** the filters in the hardware.
3504 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3506 struct i40e_aqc_add_macvlan_element_data *a, *b;
3507 struct ixl_mac_filter *f;
3508 struct i40e_hw *hw = vsi->hw;
3509 device_t dev = vsi->dev;
3512 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3513 M_DEVBUF, M_NOWAIT | M_ZERO);
3515 device_printf(dev, "add_hw_filters failed to get memory\n");
3520 ** Scan the filter list, each time we find one
3521 ** we add it to the admin queue array and turn off
3524 SLIST_FOREACH(f, &vsi->ftl, next) {
3525 if (f->flags == flags) {
3526 b = &a[j]; // a pox on fvl long names :)
3527 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3529 (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3530 b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3531 f->flags &= ~IXL_FILTER_ADD;
3538 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3540 device_printf(dev, "aq_add_macvlan err %d, "
3541 "aq_error %d\n", err, hw->aq.asq_last_status);
3543 vsi->hw_filters_add += j;
3550 ** This routine takes removals in the vsi filter
3551 ** table and creates an Admin Queue call to delete
3552 ** the filters in the hardware.
3555 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3557 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3558 struct i40e_hw *hw = vsi->hw;
3559 device_t dev = vsi->dev;
3560 struct ixl_mac_filter *f, *f_temp;
3563 DEBUGOUT("ixl_del_hw_filters: begin\n");
3565 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3566 M_DEVBUF, M_NOWAIT | M_ZERO);
3568 printf("del hw filter failed to get memory\n");
3572 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3573 if (f->flags & IXL_FILTER_DEL) {
3574 e = &d[j]; // a pox on fvl long names :)
3575 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3576 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3577 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3578 /* delete entry from vsi list */
3579 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3587 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3588 /* NOTE: returns ENOENT every time but seems to work fine,
3589 so we'll ignore that specific error. */
3590 // TODO: Does this still occur on current firmwares?
3591 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3593 for (int i = 0; i < j; i++)
3594 sc += (!d[i].error_code);
3595 vsi->hw_filters_del += sc;
3597 "Failed to remove %d/%d filters, aq error %d\n",
3598 j - sc, j, hw->aq.asq_last_status);
3600 vsi->hw_filters_del += j;
3604 DEBUGOUT("ixl_del_hw_filters: end\n");
3610 ixl_enable_rings(struct ixl_vsi *vsi)
3612 struct i40e_hw *hw = vsi->hw;
3615 for (int i = 0; i < vsi->num_queues; i++) {
3616 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3618 reg = rd32(hw, I40E_QTX_ENA(i));
3619 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3620 I40E_QTX_ENA_QENA_STAT_MASK;
3621 wr32(hw, I40E_QTX_ENA(i), reg);
3622 /* Verify the enable took */
3623 for (int j = 0; j < 10; j++) {
3624 reg = rd32(hw, I40E_QTX_ENA(i));
3625 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3627 i40e_msec_delay(10);
3629 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3630 printf("TX queue %d disabled!\n", i);
3632 reg = rd32(hw, I40E_QRX_ENA(i));
3633 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3634 I40E_QRX_ENA_QENA_STAT_MASK;
3635 wr32(hw, I40E_QRX_ENA(i), reg);
3636 /* Verify the enable took */
3637 for (int j = 0; j < 10; j++) {
3638 reg = rd32(hw, I40E_QRX_ENA(i));
3639 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3641 i40e_msec_delay(10);
3643 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3644 printf("RX queue %d disabled!\n", i);
3649 ixl_disable_rings(struct ixl_vsi *vsi)
3651 struct i40e_hw *hw = vsi->hw;
3654 for (int i = 0; i < vsi->num_queues; i++) {
3655 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3656 i40e_usec_delay(500);
3658 reg = rd32(hw, I40E_QTX_ENA(i));
3659 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3660 wr32(hw, I40E_QTX_ENA(i), reg);
3661 /* Verify the disable took */
3662 for (int j = 0; j < 10; j++) {
3663 reg = rd32(hw, I40E_QTX_ENA(i));
3664 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3666 i40e_msec_delay(10);
3668 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3669 printf("TX queue %d still enabled!\n", i);
3671 reg = rd32(hw, I40E_QRX_ENA(i));
3672 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3673 wr32(hw, I40E_QRX_ENA(i), reg);
3674 /* Verify the disable took */
3675 for (int j = 0; j < 10; j++) {
3676 reg = rd32(hw, I40E_QRX_ENA(i));
3677 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3679 i40e_msec_delay(10);
3681 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3682 printf("RX queue %d still enabled!\n", i);
3687 * ixl_handle_mdd_event
3689 * Called from interrupt handler to identify possibly malicious vfs
3690 * (But also detects events from the PF, as well)
3692 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3694 struct i40e_hw *hw = &pf->hw;
3695 device_t dev = pf->dev;
3696 bool mdd_detected = false;
3697 bool pf_mdd_detected = false;
3700 /* find what triggered the MDD event */
3701 reg = rd32(hw, I40E_GL_MDET_TX);
3702 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3703 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3704 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3705 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3706 I40E_GL_MDET_TX_EVENT_SHIFT;
3707 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3708 I40E_GL_MDET_TX_QUEUE_SHIFT;
3710 "Malicious Driver Detection event 0x%02x"
3711 " on TX queue %d pf number 0x%02x\n",
3712 event, queue, pf_num);
3713 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3714 mdd_detected = true;
3716 reg = rd32(hw, I40E_GL_MDET_RX);
3717 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3718 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3719 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3720 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3721 I40E_GL_MDET_RX_EVENT_SHIFT;
3722 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3723 I40E_GL_MDET_RX_QUEUE_SHIFT;
3725 "Malicious Driver Detection event 0x%02x"
3726 " on RX queue %d of function 0x%02x\n",
3727 event, queue, func);
3728 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3729 mdd_detected = true;
3733 reg = rd32(hw, I40E_PF_MDET_TX);
3734 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3735 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3737 "MDD TX event is for this function 0x%08x",
3739 pf_mdd_detected = true;
3741 reg = rd32(hw, I40E_PF_MDET_RX);
3742 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3743 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3745 "MDD RX event is for this function 0x%08x",
3747 pf_mdd_detected = true;
3751 /* re-enable mdd interrupt cause */
3752 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3753 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3754 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3759 ixl_enable_intr(struct ixl_vsi *vsi)
3761 struct i40e_hw *hw = vsi->hw;
3762 struct ixl_queue *que = vsi->queues;
3764 if (ixl_enable_msix) {
3765 ixl_enable_adminq(hw);
3766 for (int i = 0; i < vsi->num_queues; i++, que++)
3767 ixl_enable_queue(hw, que->me);
3769 ixl_enable_legacy(hw);
3773 ixl_disable_intr(struct ixl_vsi *vsi)
3775 struct i40e_hw *hw = vsi->hw;
3776 struct ixl_queue *que = vsi->queues;
3778 if (ixl_enable_msix) {
3779 ixl_disable_adminq(hw);
3780 for (int i = 0; i < vsi->num_queues; i++, que++)
3781 ixl_disable_queue(hw, que->me);
3783 ixl_disable_legacy(hw);
3787 ixl_enable_adminq(struct i40e_hw *hw)
3791 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3792 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3793 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3794 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3800 ixl_disable_adminq(struct i40e_hw *hw)
3804 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3805 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3811 ixl_enable_queue(struct i40e_hw *hw, int id)
3815 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3816 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3817 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3818 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3822 ixl_disable_queue(struct i40e_hw *hw, int id)
3826 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3827 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3833 ixl_enable_legacy(struct i40e_hw *hw)
3836 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3837 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3838 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3839 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3843 ixl_disable_legacy(struct i40e_hw *hw)
3847 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3848 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3854 ixl_update_stats_counters(struct ixl_pf *pf)
3856 struct i40e_hw *hw = &pf->hw;
3857 struct ixl_vsi *vsi = &pf->vsi;
3859 struct i40e_hw_port_stats *nsd = &pf->stats;
3860 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3862 /* Update hw stats */
3863 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3864 pf->stat_offsets_loaded,
3865 &osd->crc_errors, &nsd->crc_errors);
3866 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3867 pf->stat_offsets_loaded,
3868 &osd->illegal_bytes, &nsd->illegal_bytes);
3869 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3870 I40E_GLPRT_GORCL(hw->port),
3871 pf->stat_offsets_loaded,
3872 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3873 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3874 I40E_GLPRT_GOTCL(hw->port),
3875 pf->stat_offsets_loaded,
3876 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3877 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3878 pf->stat_offsets_loaded,
3879 &osd->eth.rx_discards,
3880 &nsd->eth.rx_discards);
3881 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3882 I40E_GLPRT_UPRCL(hw->port),
3883 pf->stat_offsets_loaded,
3884 &osd->eth.rx_unicast,
3885 &nsd->eth.rx_unicast);
3886 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3887 I40E_GLPRT_UPTCL(hw->port),
3888 pf->stat_offsets_loaded,
3889 &osd->eth.tx_unicast,
3890 &nsd->eth.tx_unicast);
3891 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3892 I40E_GLPRT_MPRCL(hw->port),
3893 pf->stat_offsets_loaded,
3894 &osd->eth.rx_multicast,
3895 &nsd->eth.rx_multicast);
3896 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3897 I40E_GLPRT_MPTCL(hw->port),
3898 pf->stat_offsets_loaded,
3899 &osd->eth.tx_multicast,
3900 &nsd->eth.tx_multicast);
3901 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3902 I40E_GLPRT_BPRCL(hw->port),
3903 pf->stat_offsets_loaded,
3904 &osd->eth.rx_broadcast,
3905 &nsd->eth.rx_broadcast);
3906 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3907 I40E_GLPRT_BPTCL(hw->port),
3908 pf->stat_offsets_loaded,
3909 &osd->eth.tx_broadcast,
3910 &nsd->eth.tx_broadcast);
3912 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3913 pf->stat_offsets_loaded,
3914 &osd->tx_dropped_link_down,
3915 &nsd->tx_dropped_link_down);
3916 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3917 pf->stat_offsets_loaded,
3918 &osd->mac_local_faults,
3919 &nsd->mac_local_faults);
3920 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3921 pf->stat_offsets_loaded,
3922 &osd->mac_remote_faults,
3923 &nsd->mac_remote_faults);
3924 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3925 pf->stat_offsets_loaded,
3926 &osd->rx_length_errors,
3927 &nsd->rx_length_errors);
3929 /* Flow control (LFC) stats */
3930 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3931 pf->stat_offsets_loaded,
3932 &osd->link_xon_rx, &nsd->link_xon_rx);
3933 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3934 pf->stat_offsets_loaded,
3935 &osd->link_xon_tx, &nsd->link_xon_tx);
3936 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3937 pf->stat_offsets_loaded,
3938 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3939 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3940 pf->stat_offsets_loaded,
3941 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3943 /* Packet size stats rx */
3944 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3945 I40E_GLPRT_PRC64L(hw->port),
3946 pf->stat_offsets_loaded,
3947 &osd->rx_size_64, &nsd->rx_size_64);
3948 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3949 I40E_GLPRT_PRC127L(hw->port),
3950 pf->stat_offsets_loaded,
3951 &osd->rx_size_127, &nsd->rx_size_127);
3952 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3953 I40E_GLPRT_PRC255L(hw->port),
3954 pf->stat_offsets_loaded,
3955 &osd->rx_size_255, &nsd->rx_size_255);
3956 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3957 I40E_GLPRT_PRC511L(hw->port),
3958 pf->stat_offsets_loaded,
3959 &osd->rx_size_511, &nsd->rx_size_511);
3960 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3961 I40E_GLPRT_PRC1023L(hw->port),
3962 pf->stat_offsets_loaded,
3963 &osd->rx_size_1023, &nsd->rx_size_1023);
3964 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3965 I40E_GLPRT_PRC1522L(hw->port),
3966 pf->stat_offsets_loaded,
3967 &osd->rx_size_1522, &nsd->rx_size_1522);
3968 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3969 I40E_GLPRT_PRC9522L(hw->port),
3970 pf->stat_offsets_loaded,
3971 &osd->rx_size_big, &nsd->rx_size_big);
3973 /* Packet size stats tx */
3974 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3975 I40E_GLPRT_PTC64L(hw->port),
3976 pf->stat_offsets_loaded,
3977 &osd->tx_size_64, &nsd->tx_size_64);
3978 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3979 I40E_GLPRT_PTC127L(hw->port),
3980 pf->stat_offsets_loaded,
3981 &osd->tx_size_127, &nsd->tx_size_127);
3982 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3983 I40E_GLPRT_PTC255L(hw->port),
3984 pf->stat_offsets_loaded,
3985 &osd->tx_size_255, &nsd->tx_size_255);
3986 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3987 I40E_GLPRT_PTC511L(hw->port),
3988 pf->stat_offsets_loaded,
3989 &osd->tx_size_511, &nsd->tx_size_511);
3990 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3991 I40E_GLPRT_PTC1023L(hw->port),
3992 pf->stat_offsets_loaded,
3993 &osd->tx_size_1023, &nsd->tx_size_1023);
3994 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3995 I40E_GLPRT_PTC1522L(hw->port),
3996 pf->stat_offsets_loaded,
3997 &osd->tx_size_1522, &nsd->tx_size_1522);
3998 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3999 I40E_GLPRT_PTC9522L(hw->port),
4000 pf->stat_offsets_loaded,
4001 &osd->tx_size_big, &nsd->tx_size_big);
4003 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4004 pf->stat_offsets_loaded,
4005 &osd->rx_undersize, &nsd->rx_undersize);
4006 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4007 pf->stat_offsets_loaded,
4008 &osd->rx_fragments, &nsd->rx_fragments);
4009 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4010 pf->stat_offsets_loaded,
4011 &osd->rx_oversize, &nsd->rx_oversize);
4012 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4013 pf->stat_offsets_loaded,
4014 &osd->rx_jabber, &nsd->rx_jabber);
4015 pf->stat_offsets_loaded = true;
4018 /* Update vsi stats */
4019 ixl_update_eth_stats(vsi);
4022 // ERJ - these are per-port, update all vsis?
4023 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4027 ** Tasklet handler for MSIX Adminq interrupts
4028 ** - do outside interrupt since it might sleep
4031 ixl_do_adminq(void *context, int pending)
4033 struct ixl_pf *pf = context;
4034 struct i40e_hw *hw = &pf->hw;
4035 struct ixl_vsi *vsi = &pf->vsi;
4036 struct i40e_arq_event_info event;
4041 event.buf_len = IXL_AQ_BUF_SZ;
4042 event.msg_buf = malloc(event.buf_len,
4043 M_DEVBUF, M_NOWAIT | M_ZERO);
4044 if (!event.msg_buf) {
4045 printf("Unable to allocate adminq memory\n");
4049 /* clean and process any events */
4051 ret = i40e_clean_arq_element(hw, &event, &result);
4054 opcode = LE16_TO_CPU(event.desc.opcode);
4056 case i40e_aqc_opc_get_link_status:
4057 vsi->link_up = ixl_config_link(hw);
4058 ixl_update_link_status(pf);
4060 case i40e_aqc_opc_send_msg_to_pf:
4061 /* process pf/vf communication here */
4063 case i40e_aqc_opc_event_lan_overflow:
4067 printf("AdminQ unknown event %x\n", opcode);
4072 } while (result && (loop++ < IXL_ADM_LIMIT));
4074 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4075 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4076 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4077 free(event.msg_buf, M_DEVBUF);
4080 ixl_enable_adminq(&pf->hw);
4082 ixl_enable_intr(vsi);
4086 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4089 int error, input = 0;
4091 error = sysctl_handle_int(oidp, &input, 0, req);
4093 if (error || !req->newptr)
4097 pf = (struct ixl_pf *)arg1;
4098 ixl_print_debug_info(pf);
4105 ixl_print_debug_info(struct ixl_pf *pf)
4107 struct i40e_hw *hw = &pf->hw;
4108 struct ixl_vsi *vsi = &pf->vsi;
4109 struct ixl_queue *que = vsi->queues;
4110 struct rx_ring *rxr = &que->rxr;
4111 struct tx_ring *txr = &que->txr;
4115 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4116 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4117 printf("RX next check = %x\n", rxr->next_check);
4118 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4119 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4120 printf("TX desc avail = %x\n", txr->avail);
4122 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4123 printf("RX Bytes = %x\n", reg);
4124 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4125 printf("Port RX Bytes = %x\n", reg);
4126 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4127 printf("RX discard = %x\n", reg);
4128 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4129 printf("Port RX discard = %x\n", reg);
4131 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4132 printf("TX errors = %x\n", reg);
4133 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4134 printf("TX Bytes = %x\n", reg);
4136 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4137 printf("RX undersize = %x\n", reg);
4138 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4139 printf("RX fragments = %x\n", reg);
4140 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4141 printf("RX oversize = %x\n", reg);
4142 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4143 printf("RX length error = %x\n", reg);
4144 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4145 printf("mac remote fault = %x\n", reg);
4146 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4147 printf("mac local fault = %x\n", reg);
4151 * Update VSI-specific ethernet statistics counters.
4153 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4155 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4156 struct i40e_hw *hw = &pf->hw;
4157 struct i40e_eth_stats *es;
4158 struct i40e_eth_stats *oes;
4160 uint64_t tx_discards;
4161 struct i40e_hw_port_stats *nsd;
4162 u16 stat_idx = vsi->info.stat_counter_idx;
4164 es = &vsi->eth_stats;
4165 oes = &vsi->eth_stats_offsets;
4168 /* Gather up the stats that the hw collects */
4169 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4170 vsi->stat_offsets_loaded,
4171 &oes->tx_errors, &es->tx_errors);
4172 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4173 vsi->stat_offsets_loaded,
4174 &oes->rx_discards, &es->rx_discards);
4176 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4177 I40E_GLV_GORCL(stat_idx),
4178 vsi->stat_offsets_loaded,
4179 &oes->rx_bytes, &es->rx_bytes);
4180 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4181 I40E_GLV_UPRCL(stat_idx),
4182 vsi->stat_offsets_loaded,
4183 &oes->rx_unicast, &es->rx_unicast);
4184 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4185 I40E_GLV_MPRCL(stat_idx),
4186 vsi->stat_offsets_loaded,
4187 &oes->rx_multicast, &es->rx_multicast);
4188 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4189 I40E_GLV_BPRCL(stat_idx),
4190 vsi->stat_offsets_loaded,
4191 &oes->rx_broadcast, &es->rx_broadcast);
4193 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4194 I40E_GLV_GOTCL(stat_idx),
4195 vsi->stat_offsets_loaded,
4196 &oes->tx_bytes, &es->tx_bytes);
4197 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4198 I40E_GLV_UPTCL(stat_idx),
4199 vsi->stat_offsets_loaded,
4200 &oes->tx_unicast, &es->tx_unicast);
4201 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4202 I40E_GLV_MPTCL(stat_idx),
4203 vsi->stat_offsets_loaded,
4204 &oes->tx_multicast, &es->tx_multicast);
4205 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4206 I40E_GLV_BPTCL(stat_idx),
4207 vsi->stat_offsets_loaded,
4208 &oes->tx_broadcast, &es->tx_broadcast);
4209 vsi->stat_offsets_loaded = true;
4211 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4212 for (i = 0; i < vsi->num_queues; i++)
4213 tx_discards += vsi->queues[i].txr.br->br_drops;
4215 /* Update ifnet stats */
4216 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4219 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4222 IXL_SET_IBYTES(vsi, es->rx_bytes);
4223 IXL_SET_OBYTES(vsi, es->tx_bytes);
4224 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4225 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4227 IXL_SET_OERRORS(vsi, es->tx_errors);
4228 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4229 IXL_SET_OQDROPS(vsi, tx_discards);
4230 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4231 IXL_SET_COLLISIONS(vsi, 0);
4235 * Reset all of the stats for the given pf
4237 void ixl_pf_reset_stats(struct ixl_pf *pf)
4239 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4240 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4241 pf->stat_offsets_loaded = false;
4245 * Resets all stats of the given vsi
4247 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4249 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4250 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4251 vsi->stat_offsets_loaded = false;
4255 * Read and update a 48 bit stat from the hw
4257 * Since the device stats are not reset at PFReset, they likely will not
4258 * be zeroed when the driver starts. We'll save the first values read
4259 * and use them as offsets to be subtracted from the raw values in order
4260 * to report stats that count from zero.
4263 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4264 bool offset_loaded, u64 *offset, u64 *stat)
4268 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4269 new_data = rd64(hw, loreg);
4272 * Use two rd32's instead of one rd64; FreeBSD versions before
4273 * 10 don't support 8 byte bus reads/writes.
4275 new_data = rd32(hw, loreg);
4276 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4281 if (new_data >= *offset)
4282 *stat = new_data - *offset;
4284 *stat = (new_data + ((u64)1 << 48)) - *offset;
4285 *stat &= 0xFFFFFFFFFFFFULL;
4289 * Read and update a 32 bit stat from the hw
4292 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4293 bool offset_loaded, u64 *offset, u64 *stat)
4297 new_data = rd32(hw, reg);
4300 if (new_data >= *offset)
4301 *stat = (u32)(new_data - *offset);
4303 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4307 ** Set flow control using sysctl:
4314 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4317 * TODO: ensure flow control is disabled if
4318 * priority flow control is enabled
4320 * TODO: ensure tx CRC by hardware should be enabled
4321 * if tx flow control is enabled.
4323 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4324 struct i40e_hw *hw = &pf->hw;
4325 device_t dev = pf->dev;
4327 enum i40e_status_code aq_error = 0;
4331 error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4332 if ((error) || (req->newptr == NULL))
4334 if (pf->fc < 0 || pf->fc > 3) {
4336 "Invalid fc mode; valid modes are 0 through 3\n");
4341 ** Changing flow control mode currently does not work on
4344 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4345 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4346 device_printf(dev, "Changing flow control mode unsupported"
4347 " on 40GBase-CR4 media.\n");
4351 /* Set fc ability for port */
4352 hw->fc.requested_mode = pf->fc;
4353 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4356 "%s: Error setting new fc mode %d; fc_err %#x\n",
4357 __func__, aq_error, fc_aq_err);
4365 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4367 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4368 struct i40e_hw *hw = &pf->hw;
4369 int error = 0, index = 0;
4380 ixl_update_link_status(pf);
4382 switch (hw->phy.link_info.link_speed) {
4383 case I40E_LINK_SPEED_100MB:
4386 case I40E_LINK_SPEED_1GB:
4389 case I40E_LINK_SPEED_10GB:
4392 case I40E_LINK_SPEED_40GB:
4395 case I40E_LINK_SPEED_20GB:
4398 case I40E_LINK_SPEED_UNKNOWN:
4404 error = sysctl_handle_string(oidp, speeds[index],
4405 strlen(speeds[index]), req);
4410 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4412 struct i40e_hw *hw = &pf->hw;
4413 device_t dev = pf->dev;
4414 struct i40e_aq_get_phy_abilities_resp abilities;
4415 struct i40e_aq_set_phy_config config;
4416 enum i40e_status_code aq_error = 0;
4418 /* Get current capability information */
4419 aq_error = i40e_aq_get_phy_capabilities(hw,
4420 FALSE, FALSE, &abilities, NULL);
4423 "%s: Error getting phy capabilities %d,"
4424 " aq error: %d\n", __func__, aq_error,
4425 hw->aq.asq_last_status);
4429 /* Prepare new config */
4430 bzero(&config, sizeof(config));
4431 config.phy_type = abilities.phy_type;
4432 config.abilities = abilities.abilities
4433 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4434 config.eee_capability = abilities.eee_capability;
4435 config.eeer = abilities.eeer_val;
4436 config.low_power_ctrl = abilities.d3_lpan;
4437 /* Translate into aq cmd link_speed */
4439 config.link_speed |= I40E_LINK_SPEED_10GB;
4441 config.link_speed |= I40E_LINK_SPEED_1GB;
4443 config.link_speed |= I40E_LINK_SPEED_100MB;
4445 /* Do aq command & restart link */
4446 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4449 "%s: Error setting new phy config %d,"
4450 " aq error: %d\n", __func__, aq_error,
4451 hw->aq.asq_last_status);
4456 ** This seems a bit heavy handed, but we
4457 ** need to get a reinit on some devices
4461 ixl_init_locked(pf);
4468 ** Control link advertise speed:
4470 ** 0x1 - advertise 100 Mb
4471 ** 0x2 - advertise 1G
4472 ** 0x4 - advertise 10G
4474 ** Does not work on 40G devices.
4477 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4479 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4480 struct i40e_hw *hw = &pf->hw;
4481 device_t dev = pf->dev;
4482 int requested_ls = 0;
4486 ** FW doesn't support changing advertised speed
4487 ** for 40G devices; speed is always 40G.
4489 if (i40e_is_40G_device(hw->device_id))
4492 /* Read in new mode */
4493 requested_ls = pf->advertised_speed;
4494 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4495 if ((error) || (req->newptr == NULL))
4497 if (requested_ls < 1 || requested_ls > 7) {
4499 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4503 /* Exit if no change */
4504 if (pf->advertised_speed == requested_ls)
4507 error = ixl_set_advertised_speeds(pf, requested_ls);
4511 pf->advertised_speed = requested_ls;
4512 ixl_update_link_status(pf);
4517 ** Get the width and transaction speed of
4518 ** the bus this adapter is plugged into.
4521 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4527 /* Get the PCI Express Capabilities offset */
4528 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4530 /* ...and read the Link Status Register */
4531 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4533 switch (link & I40E_PCI_LINK_WIDTH) {
4534 case I40E_PCI_LINK_WIDTH_1:
4535 hw->bus.width = i40e_bus_width_pcie_x1;
4537 case I40E_PCI_LINK_WIDTH_2:
4538 hw->bus.width = i40e_bus_width_pcie_x2;
4540 case I40E_PCI_LINK_WIDTH_4:
4541 hw->bus.width = i40e_bus_width_pcie_x4;
4543 case I40E_PCI_LINK_WIDTH_8:
4544 hw->bus.width = i40e_bus_width_pcie_x8;
4547 hw->bus.width = i40e_bus_width_unknown;
4551 switch (link & I40E_PCI_LINK_SPEED) {
4552 case I40E_PCI_LINK_SPEED_2500:
4553 hw->bus.speed = i40e_bus_speed_2500;
4555 case I40E_PCI_LINK_SPEED_5000:
4556 hw->bus.speed = i40e_bus_speed_5000;
4558 case I40E_PCI_LINK_SPEED_8000:
4559 hw->bus.speed = i40e_bus_speed_8000;
4562 hw->bus.speed = i40e_bus_speed_unknown;
4567 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4568 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4569 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4570 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4571 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4572 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4573 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4576 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4577 (hw->bus.speed < i40e_bus_speed_8000)) {
4578 device_printf(dev, "PCI-Express bandwidth available"
4579 " for this device\n is not sufficient for"
4580 " normal operation.\n");
4581 device_printf(dev, "For expected performance a x8 "
4582 "PCIE Gen3 slot is required.\n");
4589 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4591 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4592 struct i40e_hw *hw = &pf->hw;
4595 snprintf(buf, sizeof(buf),
4596 "f%d.%d a%d.%d n%02x.%02x e%08x",
4597 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4598 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4599 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4600 IXL_NVM_VERSION_HI_SHIFT,
4601 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4602 IXL_NVM_VERSION_LO_SHIFT,
4604 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4608 #ifdef IXL_DEBUG_SYSCTL
4610 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4612 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4613 struct i40e_hw *hw = &pf->hw;
4614 struct i40e_link_status link_status;
4617 enum i40e_status_code aq_error = 0;
4619 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4621 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4626 "PHY Type : %#04x\n"
4628 "Link info: %#04x\n"
4631 link_status.phy_type, link_status.link_speed,
4632 link_status.link_info, link_status.an_info,
4633 link_status.ext_info);
4635 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4639 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4641 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4642 struct i40e_hw *hw = &pf->hw;
4643 struct i40e_aq_get_phy_abilities_resp abilities_resp;
4646 enum i40e_status_code aq_error = 0;
4648 // TODO: Print out list of qualified modules as well?
4649 aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4651 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4656 "PHY Type : %#010x\n"
4658 "Abilities: %#04x\n"
4660 "EEER reg : %#010x\n"
4662 abilities_resp.phy_type, abilities_resp.link_speed,
4663 abilities_resp.abilities, abilities_resp.eee_capability,
4664 abilities_resp.eeer_val, abilities_resp.d3_lpan);
4666 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4670 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4672 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4673 struct ixl_vsi *vsi = &pf->vsi;
4674 struct ixl_mac_filter *f;
4679 int ftl_counter = 0;
4683 SLIST_FOREACH(f, &vsi->ftl, next) {
4688 sysctl_handle_string(oidp, "(none)", 6, req);
4692 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4693 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4695 sprintf(buf_i++, "\n");
4696 SLIST_FOREACH(f, &vsi->ftl, next) {
4698 MAC_FORMAT ", vlan %4d, flags %#06x",
4699 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4701 /* don't print '\n' for last entry */
4702 if (++ftl_counter != ftl_len) {
4703 sprintf(buf_i, "\n");
4708 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4710 printf("sysctl error: %d\n", error);
4711 free(buf, M_DEVBUF);
4715 #define IXL_SW_RES_SIZE 0x14
4717 ixl_res_alloc_cmp(const void *a, const void *b)
4719 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4720 one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4721 two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4723 return ((int)one->resource_type - (int)two->resource_type);
4727 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4729 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4730 struct i40e_hw *hw = &pf->hw;
4731 device_t dev = pf->dev;
4736 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4738 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4740 device_printf(dev, "Could not allocate sbuf for output.\n");
4744 bzero(resp, sizeof(resp));
4745 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4750 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4751 __func__, error, hw->aq.asq_last_status);
4756 /* Sort entries by type for display */
4757 qsort(resp, num_entries,
4758 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4759 &ixl_res_alloc_cmp);
4761 sbuf_cat(buf, "\n");
4762 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4764 "Type | Guaranteed | Total | Used | Un-allocated\n"
4765 " | (this) | (all) | (this) | (all) \n");
4766 for (int i = 0; i < num_entries; i++) {
4768 "%#4x | %10d %5d %6d %12d",
4769 resp[i].resource_type,
4773 resp[i].total_unalloced);
4774 if (i < num_entries - 1)
4775 sbuf_cat(buf, "\n");
4778 error = sbuf_finish(buf);
4785 ** Caller must init and delete sbuf; this function will clear and
4786 ** finish it for caller.
4789 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4793 if (seid == 0 && uplink)
4794 sbuf_cat(s, "Network");
4796 sbuf_cat(s, "Host");
4800 sbuf_printf(s, "MAC %d", seid - 2);
4801 else if (seid <= 15)
4802 sbuf_cat(s, "Reserved");
4803 else if (seid <= 31)
4804 sbuf_printf(s, "PF %d", seid - 16);
4805 else if (seid <= 159)
4806 sbuf_printf(s, "VF %d", seid - 32);
4807 else if (seid <= 287)
4808 sbuf_cat(s, "Reserved");
4809 else if (seid <= 511)
4810 sbuf_cat(s, "Other"); // for other structures
4811 else if (seid <= 895)
4812 sbuf_printf(s, "VSI %d", seid - 512);
4813 else if (seid <= 1023)
4814 sbuf_printf(s, "Reserved");
4816 sbuf_cat(s, "Invalid");
4819 return sbuf_data(s);
4823 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4825 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4826 struct i40e_hw *hw = &pf->hw;
4827 device_t dev = pf->dev;
4831 u8 aq_buf[I40E_AQ_LARGE_BUF];
4834 struct i40e_aqc_get_switch_config_resp *sw_config;
4835 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4837 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4839 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4843 error = i40e_aq_get_switch_config(hw, sw_config,
4844 sizeof(aq_buf), &next, NULL);
4846 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4847 __func__, error, hw->aq.asq_last_status);
4852 nmbuf = sbuf_new_auto();
4854 device_printf(dev, "Could not allocate sbuf for name output.\n");
4858 sbuf_cat(buf, "\n");
4859 // Assuming <= 255 elements in switch
4860 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4862 ** Revision -- all elements are revision 1 for now
4865 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4866 " | | | (uplink)\n");
4867 for (int i = 0; i < sw_config->header.num_reported; i++) {
4868 // "%4d (%8s) | %8s %8s %#8x",
4869 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4871 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4872 sbuf_cat(buf, " | ");
4873 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4875 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4877 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4878 if (i < sw_config->header.num_reported - 1)
4879 sbuf_cat(buf, "\n");
4883 error = sbuf_finish(buf);
4890 ** Dump TX desc given index.
4891 ** Doesn't work; don't use.
4892 ** TODO: Also needs a queue index input!
4895 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4897 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4898 device_t dev = pf->dev;
4904 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4906 device_printf(dev, "Could not allocate sbuf for output.\n");
4911 error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4914 if (req->newptr == NULL)
4915 return (EIO); // fix
4916 if (desc_idx > 1024) { // fix
4918 "Invalid descriptor index, needs to be < 1024\n"); // fix
4922 // Don't use this sysctl yet
4926 sbuf_cat(buf, "\n");
4929 struct ixl_queue *que = pf->vsi.queues;
4930 struct tx_ring *txr = &(que[1].txr);
4931 struct i40e_tx_desc *txd = &txr->base[desc_idx];
4933 sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4934 sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4935 sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4937 error = sbuf_finish(buf);
4939 device_printf(dev, "Error finishing sbuf: %d\n", error);
4944 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4946 device_printf(dev, "sysctl error: %d\n", error);
4950 #endif /* IXL_DEBUG_SYSCTL */