1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
41 #include <net/rss_config.h>
44 /*********************************************************************
46 *********************************************************************/
47 char ixl_driver_version[] = "1.3.1";
49 /*********************************************************************
52 * Used by probe to select devices to load on
53 * Last field stores an index into ixl_strings
54 * Last entry must be all 0s
56 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 *********************************************************************/
59 static ixl_vendor_info_t ixl_vendor_info_array[] =
61 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
62 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
63 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
64 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
65 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
66 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
67 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
68 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
69 /* required last entry */
73 /*********************************************************************
74 * Table of branding strings
75 *********************************************************************/
77 static char *ixl_strings[] = {
78 "Intel(R) Ethernet Connection XL710 Driver"
82 /*********************************************************************
84 *********************************************************************/
85 static int ixl_probe(device_t);
86 static int ixl_attach(device_t);
87 static int ixl_detach(device_t);
88 static int ixl_shutdown(device_t);
89 static int ixl_get_hw_capabilities(struct ixl_pf *);
90 static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
91 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
92 static void ixl_init(void *);
93 static void ixl_init_locked(struct ixl_pf *);
94 static void ixl_stop(struct ixl_pf *);
95 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixl_media_change(struct ifnet *);
97 static void ixl_update_link_status(struct ixl_pf *);
98 static int ixl_allocate_pci_resources(struct ixl_pf *);
99 static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
100 static int ixl_setup_stations(struct ixl_pf *);
101 static int ixl_setup_vsi(struct ixl_vsi *);
102 static int ixl_initialize_vsi(struct ixl_vsi *);
103 static int ixl_assign_vsi_msix(struct ixl_pf *);
104 static int ixl_assign_vsi_legacy(struct ixl_pf *);
105 static int ixl_init_msix(struct ixl_pf *);
106 static void ixl_configure_msix(struct ixl_pf *);
107 static void ixl_configure_itr(struct ixl_pf *);
108 static void ixl_configure_legacy(struct ixl_pf *);
109 static void ixl_free_pci_resources(struct ixl_pf *);
110 static void ixl_local_timer(void *);
111 static int ixl_setup_interface(device_t, struct ixl_vsi *);
112 static bool ixl_config_link(struct i40e_hw *);
113 static void ixl_config_rss(struct ixl_vsi *);
114 static void ixl_set_queue_rx_itr(struct ixl_queue *);
115 static void ixl_set_queue_tx_itr(struct ixl_queue *);
116 static int ixl_set_advertised_speeds(struct ixl_pf *, int);
118 static void ixl_enable_rings(struct ixl_vsi *);
119 static void ixl_disable_rings(struct ixl_vsi *);
120 static void ixl_enable_intr(struct ixl_vsi *);
121 static void ixl_disable_intr(struct ixl_vsi *);
123 static void ixl_enable_adminq(struct i40e_hw *);
124 static void ixl_disable_adminq(struct i40e_hw *);
125 static void ixl_enable_queue(struct i40e_hw *, int);
126 static void ixl_disable_queue(struct i40e_hw *, int);
127 static void ixl_enable_legacy(struct i40e_hw *);
128 static void ixl_disable_legacy(struct i40e_hw *);
130 static void ixl_set_promisc(struct ixl_vsi *);
131 static void ixl_add_multi(struct ixl_vsi *);
132 static void ixl_del_multi(struct ixl_vsi *);
133 static void ixl_register_vlan(void *, struct ifnet *, u16);
134 static void ixl_unregister_vlan(void *, struct ifnet *, u16);
135 static void ixl_setup_vlan_filters(struct ixl_vsi *);
137 static void ixl_init_filters(struct ixl_vsi *);
138 static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
139 static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
140 static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
141 static void ixl_del_hw_filters(struct ixl_vsi *, int);
142 static struct ixl_mac_filter *
143 ixl_find_filter(struct ixl_vsi *, u8 *, s16);
144 static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
146 /* Sysctl debug interface */
147 static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
148 static void ixl_print_debug_info(struct ixl_pf *);
150 /* The MSI/X Interrupt handlers */
151 static void ixl_intr(void *);
152 static void ixl_msix_que(void *);
153 static void ixl_msix_adminq(void *);
154 static void ixl_handle_mdd_event(struct ixl_pf *);
156 /* Deferred interrupt tasklets */
157 static void ixl_do_adminq(void *, int);
159 /* Sysctl handlers */
160 static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
161 static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
162 static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
163 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
166 static void ixl_add_hw_stats(struct ixl_pf *);
167 static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
168 struct sysctl_oid_list *, struct i40e_hw_port_stats *);
169 static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
170 struct sysctl_oid_list *,
171 struct i40e_eth_stats *);
172 static void ixl_update_stats_counters(struct ixl_pf *);
173 static void ixl_update_eth_stats(struct ixl_vsi *);
174 static void ixl_pf_reset_stats(struct ixl_pf *);
175 static void ixl_vsi_reset_stats(struct ixl_vsi *);
176 static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
178 static void ixl_stat_update32(struct i40e_hw *, u32, bool,
181 #ifdef IXL_DEBUG_SYSCTL
182 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
183 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
184 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
185 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
186 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
187 static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
190 /*********************************************************************
191 * FreeBSD Device Interface Entry Points
192 *********************************************************************/
194 static device_method_t ixl_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, ixl_probe),
197 DEVMETHOD(device_attach, ixl_attach),
198 DEVMETHOD(device_detach, ixl_detach),
199 DEVMETHOD(device_shutdown, ixl_shutdown),
203 static driver_t ixl_driver = {
204 "ixl", ixl_methods, sizeof(struct ixl_pf),
207 devclass_t ixl_devclass;
208 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
210 MODULE_DEPEND(ixl, pci, 1, 1, 1);
211 MODULE_DEPEND(ixl, ether, 1, 1, 1);
214 ** Global reset mutex
216 static struct mtx ixl_reset_mtx;
219 ** TUNEABLE PARAMETERS:
222 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
223 "IXL driver parameters");
226 * MSIX should be the default for best performance,
227 * but this allows it to be forced off for testing.
229 static int ixl_enable_msix = 1;
230 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
231 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
232 "Enable MSI-X interrupts");
235 ** Number of descriptors per ring:
236 ** - TX and RX are the same size
238 static int ixl_ringsz = DEFAULT_RING;
239 TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
240 SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
241 &ixl_ringsz, 0, "Descriptor Ring Size");
244 ** This can be set manually, if left as 0 the
245 ** number of queues will be calculated based
246 ** on cpus and msix vectors available.
248 int ixl_max_queues = 0;
249 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
250 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
251 &ixl_max_queues, 0, "Number of Queues");
254 ** Controls for Interrupt Throttling
255 ** - true/false for dynamic adjustment
256 ** - default values for static ITR
258 int ixl_dynamic_rx_itr = 0;
259 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
260 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
261 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
263 int ixl_dynamic_tx_itr = 0;
264 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
265 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
266 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
268 int ixl_rx_itr = IXL_ITR_8K;
269 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
270 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
271 &ixl_rx_itr, 0, "RX Interrupt Rate");
273 int ixl_tx_itr = IXL_ITR_4K;
274 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
275 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
276 &ixl_tx_itr, 0, "TX Interrupt Rate");
279 static int ixl_enable_fdir = 1;
280 TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
281 /* Rate at which we sample */
282 int ixl_atr_rate = 20;
283 TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
287 static char *ixl_fc_string[6] = {
297 /*********************************************************************
298 * Device identification routine
300 * ixl_probe determines if the driver should be loaded on
301 * the hardware based on PCI vendor/device id of the device.
303 * return BUS_PROBE_DEFAULT on success, positive on failure
304 *********************************************************************/
307 ixl_probe(device_t dev)
309 ixl_vendor_info_t *ent;
311 u16 pci_vendor_id, pci_device_id;
312 u16 pci_subvendor_id, pci_subdevice_id;
313 char device_name[256];
314 static bool lock_init = FALSE;
316 INIT_DEBUGOUT("ixl_probe: begin");
318 pci_vendor_id = pci_get_vendor(dev);
319 if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
322 pci_device_id = pci_get_device(dev);
323 pci_subvendor_id = pci_get_subvendor(dev);
324 pci_subdevice_id = pci_get_subdevice(dev);
326 ent = ixl_vendor_info_array;
327 while (ent->vendor_id != 0) {
328 if ((pci_vendor_id == ent->vendor_id) &&
329 (pci_device_id == ent->device_id) &&
331 ((pci_subvendor_id == ent->subvendor_id) ||
332 (ent->subvendor_id == 0)) &&
334 ((pci_subdevice_id == ent->subdevice_id) ||
335 (ent->subdevice_id == 0))) {
336 sprintf(device_name, "%s, Version - %s",
337 ixl_strings[ent->index],
339 device_set_desc_copy(dev, device_name);
340 /* One shot mutex init */
341 if (lock_init == FALSE) {
343 mtx_init(&ixl_reset_mtx,
345 "IXL RESET Lock", MTX_DEF);
347 return (BUS_PROBE_DEFAULT);
354 /*********************************************************************
355 * Device initialization routine
357 * The attach entry point is called when the driver is being loaded.
358 * This routine identifies the type of hardware, allocates all resources
359 * and initializes the hardware.
361 * return 0 on success, positive on failure
362 *********************************************************************/
365 ixl_attach(device_t dev)
373 INIT_DEBUGOUT("ixl_attach: begin");
375 /* Allocate, clear, and link in our primary soft structure */
376 pf = device_get_softc(dev);
377 pf->dev = pf->osdep.dev = dev;
381 ** Note this assumes we have a single embedded VSI,
382 ** this could be enhanced later to allocate multiple
388 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
390 /* Set up the timer callout */
391 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
394 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
395 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
396 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
397 pf, 0, ixl_set_flowcntl, "I", "Flow Control");
399 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
400 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
401 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
402 pf, 0, ixl_set_advertise, "I", "Advertised Speed");
404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
406 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
407 pf, 0, ixl_current_speed, "A", "Current Port Speed");
409 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
410 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
411 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
412 pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
414 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
415 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
416 OID_AUTO, "rx_itr", CTLFLAG_RW,
417 &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
419 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
421 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
422 &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
424 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
425 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426 OID_AUTO, "tx_itr", CTLFLAG_RW,
427 &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
429 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
430 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
431 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
432 &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
434 #ifdef IXL_DEBUG_SYSCTL
435 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
436 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
437 OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
438 pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
440 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
441 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442 OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
443 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
445 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
448 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
450 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452 OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
453 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
455 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457 OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
458 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
460 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
461 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462 OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
463 pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
466 /* Save off the PCI information */
467 hw->vendor_id = pci_get_vendor(dev);
468 hw->device_id = pci_get_device(dev);
469 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
470 hw->subsystem_vendor_id =
471 pci_read_config(dev, PCIR_SUBVEND_0, 2);
472 hw->subsystem_device_id =
473 pci_read_config(dev, PCIR_SUBDEV_0, 2);
475 hw->bus.device = pci_get_slot(dev);
476 hw->bus.func = pci_get_function(dev);
478 /* Do PCI setup - map BAR0, etc */
479 if (ixl_allocate_pci_resources(pf)) {
480 device_printf(dev, "Allocation of PCI resources failed\n");
485 /* Create for initial debugging use */
486 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
489 ixl_debug_info, "I", "Debug Information");
492 /* Establish a clean starting point */
494 error = i40e_pf_reset(hw);
496 device_printf(dev,"PF reset failure %x\n", error);
501 /* For now always do an initial CORE reset on first device */
503 static int ixl_dev_count;
504 static int ixl_dev_track[32];
506 int i, found = FALSE;
507 u16 bus = pci_get_bus(dev);
509 mtx_lock(&ixl_reset_mtx);
510 my_dev = (bus << 8) | hw->bus.device;
512 for (i = 0; i < ixl_dev_count; i++) {
513 if (ixl_dev_track[i] == my_dev)
520 ixl_dev_track[ixl_dev_count] = my_dev;
523 INIT_DEBUGOUT("Initial CORE RESET\n");
524 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
529 reg = rd32(hw, I40E_GLGEN_RSTAT);
530 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
535 wr32(hw, I40E_PF_ATQLEN, 0);
536 wr32(hw, I40E_PF_ATQBAL, 0);
537 wr32(hw, I40E_PF_ATQBAH, 0);
538 i40e_clear_pxe_mode(hw);
540 mtx_unlock(&ixl_reset_mtx);
543 /* Set admin queue parameters */
544 hw->aq.num_arq_entries = IXL_AQ_LEN;
545 hw->aq.num_asq_entries = IXL_AQ_LEN;
546 hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
547 hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
549 /* Initialize the shared code */
550 error = i40e_init_shared_code(hw);
552 device_printf(dev,"Unable to initialize the shared code\n");
557 /* Set up the admin queue */
558 error = i40e_init_adminq(hw);
560 device_printf(dev, "The driver for the device stopped "
561 "because the NVM image is newer than expected.\n"
562 "You must install the most recent version of "
563 " the network driver.\n");
566 device_printf(dev, "%s\n", ixl_fw_version_str(hw));
568 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
569 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
570 device_printf(dev, "The driver for the device detected "
571 "a newer version of the NVM image than expected.\n"
572 "Please install the most recent version of the network driver.\n");
573 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
574 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
575 device_printf(dev, "The driver for the device detected "
576 "an older version of the NVM image than expected.\n"
577 "Please update the NVM image.\n");
580 i40e_clear_pxe_mode(hw);
582 /* Get capabilities from the device */
583 error = ixl_get_hw_capabilities(pf);
585 device_printf(dev, "HW capabilities failure!\n");
589 /* Set up host memory cache */
590 error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
592 device_printf(dev, "init_lan_hmc failed: %d\n", error);
596 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
598 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
602 /* Disable LLDP from the firmware */
603 i40e_aq_stop_lldp(hw, TRUE, NULL);
605 i40e_get_mac_addr(hw, hw->mac.addr);
606 error = i40e_validate_mac_addr(hw->mac.addr);
608 device_printf(dev, "validate_mac_addr failed: %d\n", error);
611 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
612 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
614 /* Set up VSI and queues */
615 if (ixl_setup_stations(pf) != 0) {
616 device_printf(dev, "setup stations failed!\n");
621 /* Initialize mac filter list for VSI */
622 SLIST_INIT(&vsi->ftl);
624 /* Set up interrupt routing here */
626 error = ixl_assign_vsi_msix(pf);
628 error = ixl_assign_vsi_legacy(pf);
633 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
635 device_printf(dev, "link restart failed, aq_err=%d\n",
636 pf->hw.aq.asq_last_status);
639 /* Determine link state */
640 vsi->link_up = ixl_config_link(hw);
642 /* Report if Unqualified modules are found */
643 if ((vsi->link_up == FALSE) &&
644 (pf->hw.phy.link_info.link_info &
645 I40E_AQ_MEDIA_AVAILABLE) &&
646 (!(pf->hw.phy.link_info.an_info &
647 I40E_AQ_QUALIFIED_MODULE)))
648 device_printf(dev, "Link failed because "
649 "an unqualified module was detected\n");
651 /* Setup OS specific network interface */
652 if (ixl_setup_interface(dev, vsi) != 0) {
653 device_printf(dev, "interface setup failed!\n");
658 /* Get the bus configuration and set the shared code */
659 bus = ixl_get_bus_info(hw, dev);
660 i40e_set_pci_config_data(hw, bus);
662 /* Initialize statistics */
663 ixl_pf_reset_stats(pf);
664 ixl_update_stats_counters(pf);
665 ixl_add_hw_stats(pf);
667 /* Reset port's advertised speeds */
668 if (!i40e_is_40G_device(hw->device_id)) {
669 pf->advertised_speed =
670 (hw->device_id == I40E_DEV_ID_10G_BASE_T) ? 0x7 : 0x6;
671 ixl_set_advertised_speeds(pf, pf->advertised_speed);
674 /* Register for VLAN events */
675 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
676 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
677 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
678 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
681 INIT_DEBUGOUT("ixl_attach: end");
685 if (vsi->ifp != NULL)
688 i40e_shutdown_lan_hmc(hw);
690 i40e_shutdown_adminq(hw);
692 ixl_free_pci_resources(pf);
694 IXL_PF_LOCK_DESTROY(pf);
698 /*********************************************************************
699 * Device removal routine
701 * The detach entry point is called when the driver is being removed.
702 * This routine stops the adapter and deallocates all the resources
703 * that were allocated for driver operation.
705 * return 0 on success, positive on failure
706 *********************************************************************/
709 ixl_detach(device_t dev)
711 struct ixl_pf *pf = device_get_softc(dev);
712 struct i40e_hw *hw = &pf->hw;
713 struct ixl_vsi *vsi = &pf->vsi;
714 struct ixl_queue *que = vsi->queues;
717 INIT_DEBUGOUT("ixl_detach: begin");
719 /* Make sure VLANS are not using driver */
720 if (vsi->ifp->if_vlantrunk != NULL) {
721 device_printf(dev,"Vlan in use, detach first\n");
729 for (int i = 0; i < vsi->num_queues; i++, que++) {
731 taskqueue_drain(que->tq, &que->task);
732 taskqueue_drain(que->tq, &que->tx_task);
733 taskqueue_free(que->tq);
737 /* Shutdown LAN HMC */
738 status = i40e_shutdown_lan_hmc(hw);
741 "Shutdown LAN HMC failed with code %d\n", status);
743 /* Shutdown admin queue */
744 status = i40e_shutdown_adminq(hw);
747 "Shutdown Admin queue failed with code %d\n", status);
749 /* Unregister VLAN events */
750 if (vsi->vlan_attach != NULL)
751 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
752 if (vsi->vlan_detach != NULL)
753 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
755 ether_ifdetach(vsi->ifp);
756 callout_drain(&pf->timer);
759 ixl_free_pci_resources(pf);
760 bus_generic_detach(dev);
763 IXL_PF_LOCK_DESTROY(pf);
767 /*********************************************************************
769 * Shutdown entry point
771 **********************************************************************/
774 ixl_shutdown(device_t dev)
776 struct ixl_pf *pf = device_get_softc(dev);
784 /*********************************************************************
786 * Get the hardware capabilities
788 **********************************************************************/
791 ixl_get_hw_capabilities(struct ixl_pf *pf)
793 struct i40e_aqc_list_capabilities_element_resp *buf;
794 struct i40e_hw *hw = &pf->hw;
795 device_t dev = pf->dev;
800 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
802 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
803 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
804 device_printf(dev, "Unable to allocate cap memory\n");
808 /* This populates the hw struct */
809 error = i40e_aq_discover_capabilities(hw, buf, len,
810 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
812 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
814 /* retry once with a larger buffer */
818 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
819 device_printf(dev, "capability discovery failed: %d\n",
820 pf->hw.aq.asq_last_status);
824 /* Capture this PF's starting queue pair */
825 pf->qbase = hw->func_caps.base_queue;
828 device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
829 "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
830 hw->pf_id, hw->func_caps.num_vfs,
831 hw->func_caps.num_msix_vectors,
832 hw->func_caps.num_msix_vectors_vf,
833 hw->func_caps.fd_filters_guaranteed,
834 hw->func_caps.fd_filters_best_effort,
835 hw->func_caps.num_tx_qp,
836 hw->func_caps.num_rx_qp,
837 hw->func_caps.base_queue);
843 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
845 device_t dev = vsi->dev;
847 /* Enable/disable TXCSUM/TSO4 */
848 if (!(ifp->if_capenable & IFCAP_TXCSUM)
849 && !(ifp->if_capenable & IFCAP_TSO4)) {
850 if (mask & IFCAP_TXCSUM) {
851 ifp->if_capenable |= IFCAP_TXCSUM;
852 /* enable TXCSUM, restore TSO if previously enabled */
853 if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
854 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
855 ifp->if_capenable |= IFCAP_TSO4;
858 else if (mask & IFCAP_TSO4) {
859 ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
860 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
862 "TSO4 requires txcsum, enabling both...\n");
864 } else if((ifp->if_capenable & IFCAP_TXCSUM)
865 && !(ifp->if_capenable & IFCAP_TSO4)) {
866 if (mask & IFCAP_TXCSUM)
867 ifp->if_capenable &= ~IFCAP_TXCSUM;
868 else if (mask & IFCAP_TSO4)
869 ifp->if_capenable |= IFCAP_TSO4;
870 } else if((ifp->if_capenable & IFCAP_TXCSUM)
871 && (ifp->if_capenable & IFCAP_TSO4)) {
872 if (mask & IFCAP_TXCSUM) {
873 vsi->flags |= IXL_FLAGS_KEEP_TSO4;
874 ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
876 "TSO4 requires txcsum, disabling both...\n");
877 } else if (mask & IFCAP_TSO4)
878 ifp->if_capenable &= ~IFCAP_TSO4;
881 /* Enable/disable TXCSUM_IPV6/TSO6 */
882 if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
883 && !(ifp->if_capenable & IFCAP_TSO6)) {
884 if (mask & IFCAP_TXCSUM_IPV6) {
885 ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
886 if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
887 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
888 ifp->if_capenable |= IFCAP_TSO6;
890 } else if (mask & IFCAP_TSO6) {
891 ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
892 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
894 "TSO6 requires txcsum6, enabling both...\n");
896 } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
897 && !(ifp->if_capenable & IFCAP_TSO6)) {
898 if (mask & IFCAP_TXCSUM_IPV6)
899 ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
900 else if (mask & IFCAP_TSO6)
901 ifp->if_capenable |= IFCAP_TSO6;
902 } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
903 && (ifp->if_capenable & IFCAP_TSO6)) {
904 if (mask & IFCAP_TXCSUM_IPV6) {
905 vsi->flags |= IXL_FLAGS_KEEP_TSO6;
906 ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
908 "TSO6 requires txcsum6, disabling both...\n");
909 } else if (mask & IFCAP_TSO6)
910 ifp->if_capenable &= ~IFCAP_TSO6;
914 /*********************************************************************
917 * ixl_ioctl is called when the user wants to configure the
920 * return 0 on success, positive on failure
921 **********************************************************************/
924 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
926 struct ixl_vsi *vsi = ifp->if_softc;
927 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
928 struct ifreq *ifr = (struct ifreq *) data;
929 #if defined(INET) || defined(INET6)
930 struct ifaddr *ifa = (struct ifaddr *)data;
931 bool avoid_reset = FALSE;
939 if (ifa->ifa_addr->sa_family == AF_INET)
943 if (ifa->ifa_addr->sa_family == AF_INET6)
946 #if defined(INET) || defined(INET6)
948 ** Calling init results in link renegotiation,
949 ** so we avoid doing it when possible.
952 ifp->if_flags |= IFF_UP;
953 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
956 if (!(ifp->if_flags & IFF_NOARP))
957 arp_ifinit(ifp, ifa);
960 error = ether_ioctl(ifp, command, data);
964 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
965 if (ifr->ifr_mtu > IXL_MAX_FRAME -
966 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
970 ifp->if_mtu = ifr->ifr_mtu;
971 vsi->max_frame_size =
972 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
973 + ETHER_VLAN_ENCAP_LEN;
979 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
981 if (ifp->if_flags & IFF_UP) {
982 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
983 if ((ifp->if_flags ^ pf->if_flags) &
984 (IFF_PROMISC | IFF_ALLMULTI)) {
985 ixl_set_promisc(vsi);
990 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
992 pf->if_flags = ifp->if_flags;
996 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
997 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
999 ixl_disable_intr(vsi);
1001 ixl_enable_intr(vsi);
1006 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1007 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1009 ixl_disable_intr(vsi);
1011 ixl_enable_intr(vsi);
1017 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1018 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1022 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1023 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1025 ixl_cap_txcsum_tso(vsi, ifp, mask);
1027 if (mask & IFCAP_RXCSUM)
1028 ifp->if_capenable ^= IFCAP_RXCSUM;
1029 if (mask & IFCAP_RXCSUM_IPV6)
1030 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1031 if (mask & IFCAP_LRO)
1032 ifp->if_capenable ^= IFCAP_LRO;
1033 if (mask & IFCAP_VLAN_HWTAGGING)
1034 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1035 if (mask & IFCAP_VLAN_HWFILTER)
1036 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1037 if (mask & IFCAP_VLAN_HWTSO)
1038 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1039 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1041 ixl_init_locked(pf);
1044 VLAN_CAPABILITIES(ifp);
1050 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1051 error = ether_ioctl(ifp, command, data);
1059 /*********************************************************************
1062 * This routine is used in two ways. It is used by the stack as
1063 * init entry point in network interface structure. It is also used
1064 * by the driver as a hw/sw initialization routine to get to a
1067 * return 0 on success, positive on failure
1068 **********************************************************************/
1071 ixl_init_locked(struct ixl_pf *pf)
1073 struct i40e_hw *hw = &pf->hw;
1074 struct ixl_vsi *vsi = &pf->vsi;
1075 struct ifnet *ifp = vsi->ifp;
1076 device_t dev = pf->dev;
1077 struct i40e_filter_control_settings filter;
1078 u8 tmpaddr[ETHER_ADDR_LEN];
1081 mtx_assert(&pf->pf_mtx, MA_OWNED);
1082 INIT_DEBUGOUT("ixl_init: begin");
1085 /* Get the latest mac address... User might use a LAA */
1086 bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1087 I40E_ETH_LENGTH_OF_ADDRESS);
1088 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1089 i40e_validate_mac_addr(tmpaddr)) {
1090 bcopy(tmpaddr, hw->mac.addr,
1091 I40E_ETH_LENGTH_OF_ADDRESS);
1092 ret = i40e_aq_mac_address_write(hw,
1093 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1094 hw->mac.addr, NULL);
1096 device_printf(dev, "LLA address"
1097 "change failed!!\n");
1102 /* Set the various hardware offload abilities */
1103 ifp->if_hwassist = 0;
1104 if (ifp->if_capenable & IFCAP_TSO)
1105 ifp->if_hwassist |= CSUM_TSO;
1106 if (ifp->if_capenable & IFCAP_TXCSUM)
1107 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1108 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1109 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1111 /* Set up the device filtering */
1112 bzero(&filter, sizeof(filter));
1113 filter.enable_ethtype = TRUE;
1114 filter.enable_macvlan = TRUE;
1116 filter.enable_fdir = TRUE;
1118 if (i40e_set_filter_control(hw, &filter))
1119 device_printf(dev, "set_filter_control() failed\n");
1122 ixl_config_rss(vsi);
1128 ** Prepare the rings, hmc contexts, etc...
1130 if (ixl_initialize_vsi(vsi)) {
1131 device_printf(dev, "initialize vsi failed!!\n");
1135 /* Add protocol filters to list */
1136 ixl_init_filters(vsi);
1138 /* Setup vlan's if needed */
1139 ixl_setup_vlan_filters(vsi);
1141 /* Start the local timer */
1142 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1144 /* Set up MSI/X routing and the ITR settings */
1145 if (ixl_enable_msix) {
1146 ixl_configure_msix(pf);
1147 ixl_configure_itr(pf);
1149 ixl_configure_legacy(pf);
1151 ixl_enable_rings(vsi);
1153 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1155 /* Set MTU in hardware*/
1156 int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1159 device_printf(vsi->dev,
1160 "aq_set_mac_config in init error, code %d\n",
1163 /* And now turn on interrupts */
1164 ixl_enable_intr(vsi);
1166 /* Now inform the stack we're ready */
1167 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1168 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1176 struct ixl_pf *pf = arg;
1179 ixl_init_locked(pf);
1186 ** MSIX Interrupt Handlers and Tasklets
1190 ixl_handle_que(void *context, int pending)
1192 struct ixl_queue *que = context;
1193 struct ixl_vsi *vsi = que->vsi;
1194 struct i40e_hw *hw = vsi->hw;
1195 struct tx_ring *txr = &que->txr;
1196 struct ifnet *ifp = vsi->ifp;
1199 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200 more = ixl_rxeof(que, IXL_RX_LIMIT);
1203 if (!drbr_empty(ifp, txr->br))
1204 ixl_mq_start_locked(ifp, txr);
1207 taskqueue_enqueue(que->tq, &que->task);
1212 /* Reenable this interrupt - hmmm */
1213 ixl_enable_queue(hw, que->me);
1218 /*********************************************************************
1220 * Legacy Interrupt Service routine
1222 **********************************************************************/
1226 struct ixl_pf *pf = arg;
1227 struct i40e_hw *hw = &pf->hw;
1228 struct ixl_vsi *vsi = &pf->vsi;
1229 struct ixl_queue *que = vsi->queues;
1230 struct ifnet *ifp = vsi->ifp;
1231 struct tx_ring *txr = &que->txr;
1232 u32 reg, icr0, mask;
1233 bool more_tx, more_rx;
1237 /* Protect against spurious interrupts */
1238 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1241 icr0 = rd32(hw, I40E_PFINT_ICR0);
1243 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1244 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1245 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1247 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1249 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1250 taskqueue_enqueue(pf->tq, &pf->adminq);
1254 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1257 more_tx = ixl_txeof(que);
1258 if (!drbr_empty(vsi->ifp, txr->br))
1262 /* re-enable other interrupt causes */
1263 wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1265 /* And now the queues */
1266 reg = rd32(hw, I40E_QINT_RQCTL(0));
1267 reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1268 wr32(hw, I40E_QINT_RQCTL(0), reg);
1270 reg = rd32(hw, I40E_QINT_TQCTL(0));
1271 reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1272 reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1273 wr32(hw, I40E_QINT_TQCTL(0), reg);
1275 ixl_enable_legacy(hw);
1281 /*********************************************************************
1283 * MSIX VSI Interrupt Service routine
1285 **********************************************************************/
1287 ixl_msix_que(void *arg)
1289 struct ixl_queue *que = arg;
1290 struct ixl_vsi *vsi = que->vsi;
1291 struct i40e_hw *hw = vsi->hw;
1292 struct tx_ring *txr = &que->txr;
1293 bool more_tx, more_rx;
1295 /* Protect against spurious interrupts */
1296 if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1301 more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1304 more_tx = ixl_txeof(que);
1306 ** Make certain that if the stack
1307 ** has anything queued the task gets
1308 ** scheduled to handle it.
1310 if (!drbr_empty(vsi->ifp, txr->br))
1314 ixl_set_queue_rx_itr(que);
1315 ixl_set_queue_tx_itr(que);
1317 if (more_tx || more_rx)
1318 taskqueue_enqueue(que->tq, &que->task);
1320 ixl_enable_queue(hw, que->me);
1326 /*********************************************************************
1328 * MSIX Admin Queue Interrupt Service routine
1330 **********************************************************************/
1332 ixl_msix_adminq(void *arg)
1334 struct ixl_pf *pf = arg;
1335 struct i40e_hw *hw = &pf->hw;
1340 reg = rd32(hw, I40E_PFINT_ICR0);
1341 mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1343 /* Check on the cause */
1344 if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1345 mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1347 if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1348 ixl_handle_mdd_event(pf);
1349 mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1352 if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1353 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1355 reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1356 reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1357 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1359 taskqueue_enqueue(pf->tq, &pf->adminq);
1363 /*********************************************************************
1365 * Media Ioctl callback
1367 * This routine is called whenever the user queries the status of
1368 * the interface using ifconfig.
1370 **********************************************************************/
1372 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1374 struct ixl_vsi *vsi = ifp->if_softc;
1375 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1376 struct i40e_hw *hw = &pf->hw;
1378 INIT_DEBUGOUT("ixl_media_status: begin");
1381 ixl_update_link_status(pf);
1383 ifmr->ifm_status = IFM_AVALID;
1384 ifmr->ifm_active = IFM_ETHER;
1386 if (!vsi->link_up) {
1391 ifmr->ifm_status |= IFM_ACTIVE;
1392 /* Hardware is always full-duplex */
1393 ifmr->ifm_active |= IFM_FDX;
1395 switch (hw->phy.link_info.phy_type) {
1397 case I40E_PHY_TYPE_100BASE_TX:
1398 ifmr->ifm_active |= IFM_100_TX;
1401 case I40E_PHY_TYPE_1000BASE_T:
1402 ifmr->ifm_active |= IFM_1000_T;
1404 case I40E_PHY_TYPE_1000BASE_SX:
1405 ifmr->ifm_active |= IFM_1000_SX;
1407 case I40E_PHY_TYPE_1000BASE_LX:
1408 ifmr->ifm_active |= IFM_1000_LX;
1411 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1412 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1413 ifmr->ifm_active |= IFM_10G_TWINAX;
1415 case I40E_PHY_TYPE_10GBASE_KR:
1417 ** this is not technically correct
1418 ** but FreeBSD does not have the media
1419 ** type defined yet, so its a compromise.
1421 case I40E_PHY_TYPE_10GBASE_SR:
1422 ifmr->ifm_active |= IFM_10G_SR;
1424 case I40E_PHY_TYPE_10GBASE_LR:
1425 ifmr->ifm_active |= IFM_10G_LR;
1427 case I40E_PHY_TYPE_10GBASE_T:
1428 ifmr->ifm_active |= IFM_10G_T;
1431 case I40E_PHY_TYPE_40GBASE_CR4:
1432 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1433 ifmr->ifm_active |= IFM_40G_CR4;
1435 case I40E_PHY_TYPE_40GBASE_SR4:
1436 ifmr->ifm_active |= IFM_40G_SR4;
1438 case I40E_PHY_TYPE_40GBASE_LR4:
1439 ifmr->ifm_active |= IFM_40G_LR4;
1442 ifmr->ifm_active |= IFM_UNKNOWN;
1445 /* Report flow control status as well */
1446 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1447 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1448 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1449 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1456 /*********************************************************************
1458 * Media Ioctl callback
1460 * This routine is called when the user changes speed/duplex using
1461 * media/mediopt option with ifconfig.
1463 **********************************************************************/
1465 ixl_media_change(struct ifnet * ifp)
1467 struct ixl_vsi *vsi = ifp->if_softc;
1468 struct ifmedia *ifm = &vsi->media;
1470 INIT_DEBUGOUT("ixl_media_change: begin");
1472 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1475 if_printf(ifp, "Media change is currently not supported.\n");
1483 ** ATR: Application Targetted Receive - creates a filter
1484 ** based on TX flow info that will keep the receive
1485 ** portion of the flow on the same queue. Based on the
1486 ** implementation this is only available for TCP connections
1489 ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1491 struct ixl_vsi *vsi = que->vsi;
1492 struct tx_ring *txr = &que->txr;
1493 struct i40e_filter_program_desc *FDIR;
1497 /* check if ATR is enabled and sample rate */
1498 if ((!ixl_enable_fdir) || (!txr->atr_rate))
1501 ** We sample all TCP SYN/FIN packets,
1502 ** or at the selected sample rate
1505 if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1506 (txr->atr_count < txr->atr_rate))
1510 /* Get a descriptor to use */
1511 idx = txr->next_avail;
1512 FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1513 if (++idx == que->num_desc)
1516 txr->next_avail = idx;
1518 ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1519 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1521 ptype |= (etype == ETHERTYPE_IP) ?
1522 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1523 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1524 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1525 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1527 ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1529 dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1532 ** We use the TCP TH_FIN as a trigger to remove
1533 ** the filter, otherwise its an update.
1535 dtype |= (th->th_flags & TH_FIN) ?
1536 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1537 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1538 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1539 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1541 dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1542 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1544 dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1545 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1547 FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1548 FDIR->dtype_cmd_cntindex = htole32(dtype);
1555 ixl_set_promisc(struct ixl_vsi *vsi)
1557 struct ifnet *ifp = vsi->ifp;
1558 struct i40e_hw *hw = vsi->hw;
1560 bool uni = FALSE, multi = FALSE;
1562 if (ifp->if_flags & IFF_ALLMULTI)
1564 else { /* Need to count the multicast addresses */
1565 struct ifmultiaddr *ifma;
1566 if_maddr_rlock(ifp);
1567 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1568 if (ifma->ifma_addr->sa_family != AF_LINK)
1570 if (mcnt == MAX_MULTICAST_ADDR)
1574 if_maddr_runlock(ifp);
1577 if (mcnt >= MAX_MULTICAST_ADDR)
1579 if (ifp->if_flags & IFF_PROMISC)
1582 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1583 vsi->seid, uni, NULL);
1584 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1585 vsi->seid, multi, NULL);
1589 /*********************************************************************
1592 * Routines for multicast and vlan filter management.
1594 *********************************************************************/
1596 ixl_add_multi(struct ixl_vsi *vsi)
1598 struct ifmultiaddr *ifma;
1599 struct ifnet *ifp = vsi->ifp;
1600 struct i40e_hw *hw = vsi->hw;
1601 int mcnt = 0, flags;
1603 IOCTL_DEBUGOUT("ixl_add_multi: begin");
1605 if_maddr_rlock(ifp);
1607 ** First just get a count, to decide if we
1608 ** we simply use multicast promiscuous.
1610 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1611 if (ifma->ifma_addr->sa_family != AF_LINK)
1615 if_maddr_runlock(ifp);
1617 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1618 /* delete existing MC filters */
1619 ixl_del_hw_filters(vsi, mcnt);
1620 i40e_aq_set_vsi_multicast_promiscuous(hw,
1621 vsi->seid, TRUE, NULL);
1626 if_maddr_rlock(ifp);
1627 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1628 if (ifma->ifma_addr->sa_family != AF_LINK)
1630 ixl_add_mc_filter(vsi,
1631 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1634 if_maddr_runlock(ifp);
1636 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1637 ixl_add_hw_filters(vsi, flags, mcnt);
1640 IOCTL_DEBUGOUT("ixl_add_multi: end");
1645 ixl_del_multi(struct ixl_vsi *vsi)
1647 struct ifnet *ifp = vsi->ifp;
1648 struct ifmultiaddr *ifma;
1649 struct ixl_mac_filter *f;
1653 IOCTL_DEBUGOUT("ixl_del_multi: begin");
1655 /* Search for removed multicast addresses */
1656 if_maddr_rlock(ifp);
1657 SLIST_FOREACH(f, &vsi->ftl, next) {
1658 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1660 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1661 if (ifma->ifma_addr->sa_family != AF_LINK)
1663 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1664 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1669 if (match == FALSE) {
1670 f->flags |= IXL_FILTER_DEL;
1675 if_maddr_runlock(ifp);
1678 ixl_del_hw_filters(vsi, mcnt);
1682 /*********************************************************************
1685 * This routine checks for link status,updates statistics,
1686 * and runs the watchdog check.
1688 **********************************************************************/
1691 ixl_local_timer(void *arg)
1693 struct ixl_pf *pf = arg;
1694 struct i40e_hw *hw = &pf->hw;
1695 struct ixl_vsi *vsi = &pf->vsi;
1696 struct ixl_queue *que = vsi->queues;
1697 device_t dev = pf->dev;
1701 mtx_assert(&pf->pf_mtx, MA_OWNED);
1703 /* Fire off the adminq task */
1704 taskqueue_enqueue(pf->tq, &pf->adminq);
1707 ixl_update_stats_counters(pf);
1710 ** Check status of the queues
1712 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1713 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1715 for (int i = 0; i < vsi->num_queues; i++,que++) {
1716 /* Any queues with outstanding work get a sw irq */
1718 wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1720 ** Each time txeof runs without cleaning, but there
1721 ** are uncleaned descriptors it increments busy. If
1722 ** we get to 5 we declare it hung.
1724 if (que->busy == IXL_QUEUE_HUNG) {
1726 /* Mark the queue as inactive */
1727 vsi->active_queues &= ~((u64)1 << que->me);
1730 /* Check if we've come back from hung */
1731 if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1732 vsi->active_queues |= ((u64)1 << que->me);
1734 if (que->busy >= IXL_MAX_TX_BUSY) {
1736 device_printf(dev,"Warning queue %d "
1737 "appears to be hung!\n", i);
1739 que->busy = IXL_QUEUE_HUNG;
1743 /* Only reinit if all queues show hung */
1744 if (hung == vsi->num_queues)
1747 callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1751 device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1752 ixl_init_locked(pf);
1756 ** Note: this routine updates the OS on the link state
1757 ** the real check of the hardware only happens with
1758 ** a link interrupt.
1761 ixl_update_link_status(struct ixl_pf *pf)
1763 struct ixl_vsi *vsi = &pf->vsi;
1764 struct i40e_hw *hw = &pf->hw;
1765 struct ifnet *ifp = vsi->ifp;
1766 device_t dev = pf->dev;
1767 enum i40e_fc_mode fc;
1771 if (vsi->link_active == FALSE) {
1772 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1774 fc = hw->fc.current_mode;
1775 device_printf(dev,"Link is up %d Gbps %s,"
1776 " Flow Control: %s\n",
1777 ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1778 "Full Duplex", ixl_fc_string[fc]);
1780 vsi->link_active = TRUE;
1782 ** Warn user if link speed on NPAR enabled
1783 ** partition is not at least 10GB
1785 if (hw->func_caps.npar_enable &&
1786 (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
1787 hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
1788 device_printf(dev, "The partition detected link"
1789 "speed that is less than 10Gbps\n");
1790 if_link_state_change(ifp, LINK_STATE_UP);
1792 } else { /* Link down */
1793 if (vsi->link_active == TRUE) {
1795 device_printf(dev,"Link is Down\n");
1796 if_link_state_change(ifp, LINK_STATE_DOWN);
1797 vsi->link_active = FALSE;
1804 /*********************************************************************
1806 * This routine disables all traffic on the adapter by issuing a
1807 * global reset on the MAC and deallocates TX/RX buffers.
1809 **********************************************************************/
1812 ixl_stop(struct ixl_pf *pf)
1814 struct ixl_vsi *vsi = &pf->vsi;
1815 struct ifnet *ifp = vsi->ifp;
1817 mtx_assert(&pf->pf_mtx, MA_OWNED);
1819 INIT_DEBUGOUT("ixl_stop: begin\n");
1820 ixl_disable_intr(vsi);
1821 ixl_disable_rings(vsi);
1823 /* Tell the stack that the interface is no longer active */
1824 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1826 /* Stop the local timer */
1827 callout_stop(&pf->timer);
1833 /*********************************************************************
1835 * Setup MSIX Interrupt resources and handlers for the VSI
1837 **********************************************************************/
1839 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1841 device_t dev = pf->dev;
1842 struct ixl_vsi *vsi = &pf->vsi;
1843 struct ixl_queue *que = vsi->queues;
1848 pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1849 &rid, RF_SHAREABLE | RF_ACTIVE);
1850 if (pf->res == NULL) {
1851 device_printf(dev,"Unable to allocate"
1852 " bus resource: vsi legacy/msi interrupt\n");
1856 /* Set the handler function */
1857 error = bus_setup_intr(dev, pf->res,
1858 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1859 ixl_intr, pf, &pf->tag);
1862 device_printf(dev, "Failed to register legacy/msi handler");
1865 bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1866 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1867 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1868 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1869 taskqueue_thread_enqueue, &que->tq);
1870 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1871 device_get_nameunit(dev));
1872 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1873 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1874 taskqueue_thread_enqueue, &pf->tq);
1875 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1876 device_get_nameunit(dev));
1882 /*********************************************************************
1884 * Setup MSIX Interrupt resources and handlers for the VSI
1886 **********************************************************************/
1888 ixl_assign_vsi_msix(struct ixl_pf *pf)
1890 device_t dev = pf->dev;
1891 struct ixl_vsi *vsi = &pf->vsi;
1892 struct ixl_queue *que = vsi->queues;
1893 struct tx_ring *txr;
1894 int error, rid, vector = 0;
1896 /* Admin Que is vector 0*/
1898 pf->res = bus_alloc_resource_any(dev,
1899 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1901 device_printf(dev,"Unable to allocate"
1902 " bus resource: Adminq interrupt [%d]\n", rid);
1905 /* Set the adminq vector and handler */
1906 error = bus_setup_intr(dev, pf->res,
1907 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1908 ixl_msix_adminq, pf, &pf->tag);
1911 device_printf(dev, "Failed to register Admin que handler");
1914 bus_describe_intr(dev, pf->res, pf->tag, "aq");
1915 pf->admvec = vector;
1916 /* Tasklet for Admin Queue */
1917 TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1918 pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1919 taskqueue_thread_enqueue, &pf->tq);
1920 taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1921 device_get_nameunit(pf->dev));
1924 /* Now set up the stations */
1925 for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1929 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1930 RF_SHAREABLE | RF_ACTIVE);
1931 if (que->res == NULL) {
1932 device_printf(dev,"Unable to allocate"
1933 " bus resource: que interrupt [%d]\n", vector);
1936 /* Set the handler function */
1937 error = bus_setup_intr(dev, que->res,
1938 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1939 ixl_msix_que, que, &que->tag);
1942 device_printf(dev, "Failed to register que handler");
1945 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1946 /* Bind the vector to a CPU */
1948 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1950 bus_bind_intr(dev, que->res, cpu_id);
1952 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1953 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1954 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1955 taskqueue_thread_enqueue, &que->tq);
1957 taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1958 cpu_id, "%s (bucket %d)",
1959 device_get_nameunit(dev), cpu_id);
1961 taskqueue_start_threads(&que->tq, 1, PI_NET,
1962 "%s que", device_get_nameunit(dev));
1971 * Allocate MSI/X vectors
1974 ixl_init_msix(struct ixl_pf *pf)
1976 device_t dev = pf->dev;
1977 int rid, want, vectors, queues, available;
1979 /* Override by tuneable */
1980 if (ixl_enable_msix == 0)
1984 ** When used in a virtualized environment
1985 ** PCI BUSMASTER capability may not be set
1986 ** so explicity set it here and rewrite
1987 ** the ENABLE in the MSIX control register
1988 ** at this point to cause the host to
1989 ** successfully initialize us.
1994 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1995 pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1996 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1997 pci_find_cap(dev, PCIY_MSIX, &rid);
1998 rid += PCIR_MSIX_CTRL;
1999 msix_ctrl = pci_read_config(dev, rid, 2);
2000 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2001 pci_write_config(dev, rid, msix_ctrl, 2);
2004 /* First try MSI/X */
2005 rid = PCIR_BAR(IXL_BAR);
2006 pf->msix_mem = bus_alloc_resource_any(dev,
2007 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2008 if (!pf->msix_mem) {
2009 /* May not be enabled */
2010 device_printf(pf->dev,
2011 "Unable to map MSIX table \n");
2015 available = pci_msix_count(dev);
2016 if (available == 0) { /* system has msix disabled */
2017 bus_release_resource(dev, SYS_RES_MEMORY,
2019 pf->msix_mem = NULL;
2023 /* Figure out a reasonable auto config value */
2024 queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2026 /* Override with hardcoded value if sane */
2027 if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2028 queues = ixl_max_queues;
2031 /* If we're doing RSS, clamp at the number of RSS buckets */
2032 if (queues > rss_getnumbuckets())
2033 queues = rss_getnumbuckets();
2037 ** Want one vector (RX/TX pair) per queue
2038 ** plus an additional for the admin queue.
2041 if (want <= available) /* Have enough */
2044 device_printf(pf->dev,
2045 "MSIX Configuration Problem, "
2046 "%d vectors available but %d wanted!\n",
2048 return (0); /* Will go to Legacy setup */
2051 if (pci_alloc_msix(dev, &vectors) == 0) {
2052 device_printf(pf->dev,
2053 "Using MSIX interrupts with %d vectors\n", vectors);
2055 pf->vsi.num_queues = queues;
2058 * If we're doing RSS, the number of queues needs to
2059 * match the number of RSS buckets that are configured.
2061 * + If there's more queues than RSS buckets, we'll end
2062 * up with queues that get no traffic.
2064 * + If there's more RSS buckets than queues, we'll end
2065 * up having multiple RSS buckets map to the same queue,
2066 * so there'll be some contention.
2068 if (queues != rss_getnumbuckets()) {
2070 "%s: queues (%d) != RSS buckets (%d)"
2071 "; performance will be impacted.\n",
2072 __func__, queues, rss_getnumbuckets());
2078 vectors = pci_msi_count(dev);
2079 pf->vsi.num_queues = 1;
2082 ixl_enable_msix = 0;
2083 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2084 device_printf(pf->dev,"Using an MSI interrupt\n");
2087 device_printf(pf->dev,"Using a Legacy interrupt\n");
2094 * Plumb MSI/X vectors
2097 ixl_configure_msix(struct ixl_pf *pf)
2099 struct i40e_hw *hw = &pf->hw;
2100 struct ixl_vsi *vsi = &pf->vsi;
2104 /* First set up the adminq - vector 0 */
2105 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2106 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2108 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2109 I40E_PFINT_ICR0_ENA_GRST_MASK |
2110 I40E_PFINT_ICR0_HMC_ERR_MASK |
2111 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2112 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2113 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2114 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2115 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2117 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2118 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2120 wr32(hw, I40E_PFINT_DYN_CTL0,
2121 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2122 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2124 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2126 /* Next configure the queues */
2127 for (int i = 0; i < vsi->num_queues; i++, vector++) {
2128 wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2129 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2131 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2132 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2133 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2134 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2135 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2136 wr32(hw, I40E_QINT_RQCTL(i), reg);
2138 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2139 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2140 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2141 ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2142 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2143 if (i == (vsi->num_queues - 1))
2144 reg |= (IXL_QUEUE_EOL
2145 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2146 wr32(hw, I40E_QINT_TQCTL(i), reg);
2151 * Configure for MSI single vector operation
2154 ixl_configure_legacy(struct ixl_pf *pf)
2156 struct i40e_hw *hw = &pf->hw;
2160 wr32(hw, I40E_PFINT_ITR0(0), 0);
2161 wr32(hw, I40E_PFINT_ITR0(1), 0);
2164 /* Setup "other" causes */
2165 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2166 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2167 | I40E_PFINT_ICR0_ENA_GRST_MASK
2168 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2169 | I40E_PFINT_ICR0_ENA_GPIO_MASK
2170 | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2171 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2172 | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2173 | I40E_PFINT_ICR0_ENA_VFLR_MASK
2174 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2176 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2178 /* SW_ITR_IDX = 0, but don't change INTENA */
2179 wr32(hw, I40E_PFINT_DYN_CTL0,
2180 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2181 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2182 /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2183 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2185 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2186 wr32(hw, I40E_PFINT_LNKLST0, 0);
2188 /* Associate the queue pair to the vector and enable the q int */
2189 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2190 | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2191 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2192 wr32(hw, I40E_QINT_RQCTL(0), reg);
2194 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2195 | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2196 | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2197 wr32(hw, I40E_QINT_TQCTL(0), reg);
2199 /* Next enable the queue pair */
2200 reg = rd32(hw, I40E_QTX_ENA(0));
2201 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2202 wr32(hw, I40E_QTX_ENA(0), reg);
2204 reg = rd32(hw, I40E_QRX_ENA(0));
2205 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2206 wr32(hw, I40E_QRX_ENA(0), reg);
2211 * Set the Initial ITR state
2214 ixl_configure_itr(struct ixl_pf *pf)
2216 struct i40e_hw *hw = &pf->hw;
2217 struct ixl_vsi *vsi = &pf->vsi;
2218 struct ixl_queue *que = vsi->queues;
2220 vsi->rx_itr_setting = ixl_rx_itr;
2221 if (ixl_dynamic_rx_itr)
2222 vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2223 vsi->tx_itr_setting = ixl_tx_itr;
2224 if (ixl_dynamic_tx_itr)
2225 vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2227 for (int i = 0; i < vsi->num_queues; i++, que++) {
2228 struct tx_ring *txr = &que->txr;
2229 struct rx_ring *rxr = &que->rxr;
2231 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2232 vsi->rx_itr_setting);
2233 rxr->itr = vsi->rx_itr_setting;
2234 rxr->latency = IXL_AVE_LATENCY;
2235 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2236 vsi->tx_itr_setting);
2237 txr->itr = vsi->tx_itr_setting;
2238 txr->latency = IXL_AVE_LATENCY;
2244 ixl_allocate_pci_resources(struct ixl_pf *pf)
2247 device_t dev = pf->dev;
2250 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2253 if (!(pf->pci_mem)) {
2254 device_printf(dev,"Unable to allocate bus resource: memory\n");
2258 pf->osdep.mem_bus_space_tag =
2259 rman_get_bustag(pf->pci_mem);
2260 pf->osdep.mem_bus_space_handle =
2261 rman_get_bushandle(pf->pci_mem);
2262 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2263 pf->osdep.flush_reg = I40E_GLGEN_STAT;
2264 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2266 pf->hw.back = &pf->osdep;
2269 ** Now setup MSI or MSI/X, should
2270 ** return us the number of supported
2271 ** vectors. (Will be 1 for MSI)
2273 pf->msix = ixl_init_msix(pf);
2278 ixl_free_pci_resources(struct ixl_pf * pf)
2280 struct ixl_vsi *vsi = &pf->vsi;
2281 struct ixl_queue *que = vsi->queues;
2282 device_t dev = pf->dev;
2285 memrid = PCIR_BAR(IXL_BAR);
2287 /* We may get here before stations are setup */
2288 if ((!ixl_enable_msix) || (que == NULL))
2292 ** Release all msix VSI resources:
2294 for (int i = 0; i < vsi->num_queues; i++, que++) {
2295 rid = que->msix + 1;
2296 if (que->tag != NULL) {
2297 bus_teardown_intr(dev, que->res, que->tag);
2300 if (que->res != NULL)
2301 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2305 /* Clean the AdminQ interrupt last */
2306 if (pf->admvec) /* we are doing MSIX */
2307 rid = pf->admvec + 1;
2309 (pf->msix != 0) ? (rid = 1):(rid = 0);
2311 if (pf->tag != NULL) {
2312 bus_teardown_intr(dev, pf->res, pf->tag);
2315 if (pf->res != NULL)
2316 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2319 pci_release_msi(dev);
2321 if (pf->msix_mem != NULL)
2322 bus_release_resource(dev, SYS_RES_MEMORY,
2323 memrid, pf->msix_mem);
2325 if (pf->pci_mem != NULL)
2326 bus_release_resource(dev, SYS_RES_MEMORY,
2327 PCIR_BAR(0), pf->pci_mem);
2333 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2335 /* Display supported media types */
2336 if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2337 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2339 if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2340 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2342 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2343 phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2344 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2345 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2346 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2347 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2348 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2349 if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2350 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2352 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2353 phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2354 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2355 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2356 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2357 if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2358 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2361 /*********************************************************************
2363 * Setup networking device structure and register an interface.
2365 **********************************************************************/
2367 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2370 struct i40e_hw *hw = vsi->hw;
2371 struct ixl_queue *que = vsi->queues;
2372 struct i40e_aq_get_phy_abilities_resp abilities_resp;
2373 enum i40e_status_code aq_error = 0;
2375 INIT_DEBUGOUT("ixl_setup_interface: begin");
2377 ifp = vsi->ifp = if_alloc(IFT_ETHER);
2379 device_printf(dev, "can not allocate ifnet structure\n");
2382 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2383 ifp->if_mtu = ETHERMTU;
2384 if_initbaudrate(ifp, IF_Gbps(40));
2385 ifp->if_init = ixl_init;
2386 ifp->if_softc = vsi;
2387 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2388 ifp->if_ioctl = ixl_ioctl;
2390 #if __FreeBSD_version >= 1100036
2391 if_setgetcounterfn(ifp, ixl_get_counter);
2394 ifp->if_transmit = ixl_mq_start;
2396 ifp->if_qflush = ixl_qflush;
2398 ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2400 vsi->max_frame_size =
2401 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2402 + ETHER_VLAN_ENCAP_LEN;
2405 * Tell the upper layer(s) we support long frames.
2407 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2409 ifp->if_capabilities |= IFCAP_HWCSUM;
2410 ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2411 ifp->if_capabilities |= IFCAP_TSO;
2412 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2413 ifp->if_capabilities |= IFCAP_LRO;
2415 /* VLAN capabilties */
2416 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2419 | IFCAP_VLAN_HWCSUM;
2420 ifp->if_capenable = ifp->if_capabilities;
2423 ** Don't turn this on by default, if vlans are
2424 ** created on another pseudo device (eg. lagg)
2425 ** then vlan events are not passed thru, breaking
2426 ** operation, but with HW FILTER off it works. If
2427 ** using vlans directly on the ixl driver you can
2428 ** enable this and get full hardware tag filtering.
2430 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2433 * Specify the media types supported by this adapter and register
2434 * callbacks to update media and link information
2436 ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2439 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2440 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2441 /* Need delay to detect fiber correctly */
2442 i40e_msec_delay(200);
2443 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2444 TRUE, &abilities_resp, NULL);
2445 if (aq_error == I40E_ERR_UNKNOWN_PHY)
2446 device_printf(dev, "Unknown PHY type detected!\n");
2448 ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2449 } else if (aq_error) {
2450 device_printf(dev, "Error getting supported media types, err %d,"
2451 " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2453 ixl_add_ifmedia(vsi, abilities_resp.phy_type);
2455 /* Use autoselect media by default */
2456 ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2457 ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2459 ether_ifattach(ifp, hw->mac.addr);
2465 ixl_config_link(struct i40e_hw *hw)
2469 i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2470 check = i40e_get_link_status(hw);
2472 printf("Link is %s\n", check ? "up":"down");
2477 /*********************************************************************
2479 * Initialize this VSI
2481 **********************************************************************/
2483 ixl_setup_vsi(struct ixl_vsi *vsi)
2485 struct i40e_hw *hw = vsi->hw;
2486 device_t dev = vsi->dev;
2487 struct i40e_aqc_get_switch_config_resp *sw_config;
2488 struct i40e_vsi_context ctxt;
2489 u8 aq_buf[I40E_AQ_LARGE_BUF];
2490 int ret = I40E_SUCCESS;
2493 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2494 ret = i40e_aq_get_switch_config(hw, sw_config,
2495 sizeof(aq_buf), &next, NULL);
2497 device_printf(dev,"aq_get_switch_config failed!!\n");
2501 printf("Switch config: header reported: %d in structure, %d total\n",
2502 sw_config->header.num_reported, sw_config->header.num_total);
2503 printf("type=%d seid=%d uplink=%d downlink=%d\n",
2504 sw_config->element[0].element_type,
2505 sw_config->element[0].seid,
2506 sw_config->element[0].uplink_seid,
2507 sw_config->element[0].downlink_seid);
2509 /* Save off this important value */
2510 vsi->seid = sw_config->element[0].seid;
2512 memset(&ctxt, 0, sizeof(ctxt));
2513 ctxt.seid = vsi->seid;
2514 ctxt.pf_num = hw->pf_id;
2515 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2517 device_printf(dev,"get vsi params failed %x!!\n", ret);
2521 printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2522 "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2523 "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2524 ctxt.uplink_seid, ctxt.vsi_number,
2525 ctxt.vsis_allocated, ctxt.vsis_unallocated,
2526 ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2527 ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2530 ** Set the queue and traffic class bits
2531 ** - when multiple traffic classes are supported
2532 ** this will need to be more robust.
2534 ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2535 ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2536 ctxt.info.queue_mapping[0] = 0;
2537 ctxt.info.tc_mapping[0] = 0x0800;
2539 /* Set VLAN receive stripping mode */
2540 ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2541 ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2542 if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2543 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2545 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2547 /* Keep copy of VSI info in VSI for statistic counters */
2548 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2550 /* Reset VSI statistics */
2551 ixl_vsi_reset_stats(vsi);
2552 vsi->hw_filters_add = 0;
2553 vsi->hw_filters_del = 0;
2555 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2557 device_printf(dev,"update vsi params failed %x!!\n",
2558 hw->aq.asq_last_status);
2563 /*********************************************************************
2565 * Initialize the VSI: this handles contexts, which means things
2566 * like the number of descriptors, buffer size,
2567 * plus we init the rings thru this function.
2569 **********************************************************************/
2571 ixl_initialize_vsi(struct ixl_vsi *vsi)
2573 struct ixl_queue *que = vsi->queues;
2574 device_t dev = vsi->dev;
2575 struct i40e_hw *hw = vsi->hw;
2579 for (int i = 0; i < vsi->num_queues; i++, que++) {
2580 struct tx_ring *txr = &que->txr;
2581 struct rx_ring *rxr = &que->rxr;
2582 struct i40e_hmc_obj_txq tctx;
2583 struct i40e_hmc_obj_rxq rctx;
2588 /* Setup the HMC TX Context */
2589 size = que->num_desc * sizeof(struct i40e_tx_desc);
2590 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2591 tctx.new_context = 1;
2592 tctx.base = (txr->dma.pa/128);
2593 tctx.qlen = que->num_desc;
2595 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2596 /* Enable HEAD writeback */
2597 tctx.head_wb_ena = 1;
2598 tctx.head_wb_addr = txr->dma.pa +
2599 (que->num_desc * sizeof(struct i40e_tx_desc));
2600 tctx.rdylist_act = 0;
2601 err = i40e_clear_lan_tx_queue_context(hw, i);
2603 device_printf(dev, "Unable to clear TX context\n");
2606 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2608 device_printf(dev, "Unable to set TX context\n");
2611 /* Associate the ring with this PF */
2612 txctl = I40E_QTX_CTL_PF_QUEUE;
2613 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2614 I40E_QTX_CTL_PF_INDX_MASK);
2615 wr32(hw, I40E_QTX_CTL(i), txctl);
2618 /* Do ring (re)init */
2619 ixl_init_tx_ring(que);
2621 /* Next setup the HMC RX Context */
2622 if (vsi->max_frame_size <= 2048)
2623 rxr->mbuf_sz = MCLBYTES;
2625 rxr->mbuf_sz = MJUMPAGESIZE;
2627 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2629 /* Set up an RX context for the HMC */
2630 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2631 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2632 /* ignore header split for now */
2633 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2634 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2635 vsi->max_frame_size : max_rxmax;
2637 rctx.dsize = 1; /* do 32byte descriptors */
2638 rctx.hsplit_0 = 0; /* no HDR split initially */
2639 rctx.base = (rxr->dma.pa/128);
2640 rctx.qlen = que->num_desc;
2641 rctx.tphrdesc_ena = 1;
2642 rctx.tphwdesc_ena = 1;
2643 rctx.tphdata_ena = 0;
2644 rctx.tphhead_ena = 0;
2645 rctx.lrxqthresh = 2;
2652 err = i40e_clear_lan_rx_queue_context(hw, i);
2655 "Unable to clear RX context %d\n", i);
2658 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2660 device_printf(dev, "Unable to set RX context %d\n", i);
2663 err = ixl_init_rx_ring(que);
2665 device_printf(dev, "Fail in init_rx_ring %d\n", i);
2668 wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2669 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2675 /*********************************************************************
2677 * Free all VSI structs.
2679 **********************************************************************/
2681 ixl_free_vsi(struct ixl_vsi *vsi)
2683 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2684 struct ixl_queue *que = vsi->queues;
2685 struct ixl_mac_filter *f;
2687 /* Free station queues */
2688 for (int i = 0; i < vsi->num_queues; i++, que++) {
2689 struct tx_ring *txr = &que->txr;
2690 struct rx_ring *rxr = &que->rxr;
2692 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2695 ixl_free_que_tx(que);
2697 i40e_free_dma_mem(&pf->hw, &txr->dma);
2699 IXL_TX_LOCK_DESTROY(txr);
2701 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2704 ixl_free_que_rx(que);
2706 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2708 IXL_RX_LOCK_DESTROY(rxr);
2711 free(vsi->queues, M_DEVBUF);
2713 /* Free VSI filter list */
2714 while (!SLIST_EMPTY(&vsi->ftl)) {
2715 f = SLIST_FIRST(&vsi->ftl);
2716 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2722 /*********************************************************************
2724 * Allocate memory for the VSI (virtual station interface) and their
2725 * associated queues, rings and the descriptors associated with each,
2726 * called only once at attach.
2728 **********************************************************************/
2730 ixl_setup_stations(struct ixl_pf *pf)
2732 device_t dev = pf->dev;
2733 struct ixl_vsi *vsi;
2734 struct ixl_queue *que;
2735 struct tx_ring *txr;
2736 struct rx_ring *rxr;
2738 int error = I40E_SUCCESS;
2741 vsi->back = (void *)pf;
2746 /* Get memory for the station queues */
2748 (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2749 vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2750 device_printf(dev, "Unable to allocate queue memory\n");
2755 for (int i = 0; i < vsi->num_queues; i++) {
2756 que = &vsi->queues[i];
2757 que->num_desc = ixl_ringsz;
2760 /* mark the queue as active */
2761 vsi->active_queues |= (u64)1 << que->me;
2764 txr->tail = I40E_QTX_TAIL(que->me);
2766 /* Initialize the TX lock */
2767 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2768 device_get_nameunit(dev), que->me);
2769 mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2770 /* Create the TX descriptor ring */
2771 tsize = roundup2((que->num_desc *
2772 sizeof(struct i40e_tx_desc)) +
2773 sizeof(u32), DBA_ALIGN);
2774 if (i40e_allocate_dma_mem(&pf->hw,
2775 &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2777 "Unable to allocate TX Descriptor memory\n");
2781 txr->base = (struct i40e_tx_desc *)txr->dma.va;
2782 bzero((void *)txr->base, tsize);
2783 /* Now allocate transmit soft structs for the ring */
2784 if (ixl_allocate_tx_data(que)) {
2786 "Critical Failure setting up TX structures\n");
2790 /* Allocate a buf ring */
2791 txr->br = buf_ring_alloc(4096, M_DEVBUF,
2792 M_WAITOK, &txr->mtx);
2793 if (txr->br == NULL) {
2795 "Critical Failure setting up TX buf ring\n");
2801 * Next the RX queues...
2803 rsize = roundup2(que->num_desc *
2804 sizeof(union i40e_rx_desc), DBA_ALIGN);
2807 rxr->tail = I40E_QRX_TAIL(que->me);
2809 /* Initialize the RX side lock */
2810 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2811 device_get_nameunit(dev), que->me);
2812 mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2814 if (i40e_allocate_dma_mem(&pf->hw,
2815 &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2817 "Unable to allocate RX Descriptor memory\n");
2821 rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2822 bzero((void *)rxr->base, rsize);
2824 /* Allocate receive soft structs for the ring*/
2825 if (ixl_allocate_rx_data(que)) {
2827 "Critical Failure setting up receive structs\n");
2836 for (int i = 0; i < vsi->num_queues; i++) {
2837 que = &vsi->queues[i];
2841 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2843 i40e_free_dma_mem(&pf->hw, &txr->dma);
2851 ** Provide a update to the queue RX
2852 ** interrupt moderation value.
2855 ixl_set_queue_rx_itr(struct ixl_queue *que)
2857 struct ixl_vsi *vsi = que->vsi;
2858 struct i40e_hw *hw = vsi->hw;
2859 struct rx_ring *rxr = &que->rxr;
2865 /* Idle, do nothing */
2866 if (rxr->bytes == 0)
2869 if (ixl_dynamic_rx_itr) {
2870 rx_bytes = rxr->bytes/rxr->itr;
2873 /* Adjust latency range */
2874 switch (rxr->latency) {
2875 case IXL_LOW_LATENCY:
2876 if (rx_bytes > 10) {
2877 rx_latency = IXL_AVE_LATENCY;
2878 rx_itr = IXL_ITR_20K;
2881 case IXL_AVE_LATENCY:
2882 if (rx_bytes > 20) {
2883 rx_latency = IXL_BULK_LATENCY;
2884 rx_itr = IXL_ITR_8K;
2885 } else if (rx_bytes <= 10) {
2886 rx_latency = IXL_LOW_LATENCY;
2887 rx_itr = IXL_ITR_100K;
2890 case IXL_BULK_LATENCY:
2891 if (rx_bytes <= 20) {
2892 rx_latency = IXL_AVE_LATENCY;
2893 rx_itr = IXL_ITR_20K;
2898 rxr->latency = rx_latency;
2900 if (rx_itr != rxr->itr) {
2901 /* do an exponential smoothing */
2902 rx_itr = (10 * rx_itr * rxr->itr) /
2903 ((9 * rx_itr) + rxr->itr);
2904 rxr->itr = rx_itr & IXL_MAX_ITR;
2905 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2906 que->me), rxr->itr);
2908 } else { /* We may have have toggled to non-dynamic */
2909 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2910 vsi->rx_itr_setting = ixl_rx_itr;
2911 /* Update the hardware if needed */
2912 if (rxr->itr != vsi->rx_itr_setting) {
2913 rxr->itr = vsi->rx_itr_setting;
2914 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2915 que->me), rxr->itr);
2925 ** Provide a update to the queue TX
2926 ** interrupt moderation value.
2929 ixl_set_queue_tx_itr(struct ixl_queue *que)
2931 struct ixl_vsi *vsi = que->vsi;
2932 struct i40e_hw *hw = vsi->hw;
2933 struct tx_ring *txr = &que->txr;
2939 /* Idle, do nothing */
2940 if (txr->bytes == 0)
2943 if (ixl_dynamic_tx_itr) {
2944 tx_bytes = txr->bytes/txr->itr;
2947 switch (txr->latency) {
2948 case IXL_LOW_LATENCY:
2949 if (tx_bytes > 10) {
2950 tx_latency = IXL_AVE_LATENCY;
2951 tx_itr = IXL_ITR_20K;
2954 case IXL_AVE_LATENCY:
2955 if (tx_bytes > 20) {
2956 tx_latency = IXL_BULK_LATENCY;
2957 tx_itr = IXL_ITR_8K;
2958 } else if (tx_bytes <= 10) {
2959 tx_latency = IXL_LOW_LATENCY;
2960 tx_itr = IXL_ITR_100K;
2963 case IXL_BULK_LATENCY:
2964 if (tx_bytes <= 20) {
2965 tx_latency = IXL_AVE_LATENCY;
2966 tx_itr = IXL_ITR_20K;
2971 txr->latency = tx_latency;
2973 if (tx_itr != txr->itr) {
2974 /* do an exponential smoothing */
2975 tx_itr = (10 * tx_itr * txr->itr) /
2976 ((9 * tx_itr) + txr->itr);
2977 txr->itr = tx_itr & IXL_MAX_ITR;
2978 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2979 que->me), txr->itr);
2982 } else { /* We may have have toggled to non-dynamic */
2983 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2984 vsi->tx_itr_setting = ixl_tx_itr;
2985 /* Update the hardware if needed */
2986 if (txr->itr != vsi->tx_itr_setting) {
2987 txr->itr = vsi->tx_itr_setting;
2988 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2989 que->me), txr->itr);
2999 ixl_add_hw_stats(struct ixl_pf *pf)
3001 device_t dev = pf->dev;
3002 struct ixl_vsi *vsi = &pf->vsi;
3003 struct ixl_queue *queues = vsi->queues;
3004 struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
3005 struct i40e_hw_port_stats *pf_stats = &pf->stats;
3007 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3008 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3009 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3011 struct sysctl_oid *vsi_node, *queue_node;
3012 struct sysctl_oid_list *vsi_list, *queue_list;
3014 struct tx_ring *txr;
3015 struct rx_ring *rxr;
3017 /* Driver statistics */
3018 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3019 CTLFLAG_RD, &pf->watchdog_events,
3020 "Watchdog timeouts");
3021 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3022 CTLFLAG_RD, &pf->admin_irq,
3023 "Admin Queue IRQ Handled");
3025 /* VSI statistics */
3026 #define QUEUE_NAME_LEN 32
3027 char queue_namebuf[QUEUE_NAME_LEN];
3029 // ERJ: Only one vsi now, re-do when >1 VSI enabled
3030 // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
3031 vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3032 CTLFLAG_RD, NULL, "VSI-specific stats");
3033 vsi_list = SYSCTL_CHILDREN(vsi_node);
3035 ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
3037 /* Queue statistics */
3038 for (int q = 0; q < vsi->num_queues; q++) {
3039 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3040 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3041 CTLFLAG_RD, NULL, "Queue #");
3042 queue_list = SYSCTL_CHILDREN(queue_node);
3044 txr = &(queues[q].txr);
3045 rxr = &(queues[q].rxr);
3047 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3048 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3049 "m_defrag() failed");
3050 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3051 CTLFLAG_RD, &(queues[q].dropped_pkts),
3052 "Driver dropped packets");
3053 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3054 CTLFLAG_RD, &(queues[q].irqs),
3055 "irqs on this queue");
3056 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3057 CTLFLAG_RD, &(queues[q].tso),
3059 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3060 CTLFLAG_RD, &(queues[q].tx_dma_setup),
3061 "Driver tx dma failure in xmit");
3062 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3063 CTLFLAG_RD, &(txr->no_desc),
3064 "Queue No Descriptor Available");
3065 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3066 CTLFLAG_RD, &(txr->total_packets),
3067 "Queue Packets Transmitted");
3068 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3069 CTLFLAG_RD, &(txr->tx_bytes),
3070 "Queue Bytes Transmitted");
3071 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3072 CTLFLAG_RD, &(rxr->rx_packets),
3073 "Queue Packets Received");
3074 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3075 CTLFLAG_RD, &(rxr->rx_bytes),
3076 "Queue Bytes Received");
3080 ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3084 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3085 struct sysctl_oid_list *child,
3086 struct i40e_eth_stats *eth_stats)
3088 struct ixl_sysctl_info ctls[] =
3090 {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3091 {ð_stats->rx_unicast, "ucast_pkts_rcvd",
3092 "Unicast Packets Received"},
3093 {ð_stats->rx_multicast, "mcast_pkts_rcvd",
3094 "Multicast Packets Received"},
3095 {ð_stats->rx_broadcast, "bcast_pkts_rcvd",
3096 "Broadcast Packets Received"},
3097 {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3098 {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3099 {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3100 {ð_stats->tx_multicast, "mcast_pkts_txd",
3101 "Multicast Packets Transmitted"},
3102 {ð_stats->tx_broadcast, "bcast_pkts_txd",
3103 "Broadcast Packets Transmitted"},
3108 struct ixl_sysctl_info *entry = ctls;
3109 while (entry->stat != 0)
3111 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3112 CTLFLAG_RD, entry->stat,
3113 entry->description);
3119 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3120 struct sysctl_oid_list *child,
3121 struct i40e_hw_port_stats *stats)
3123 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3124 CTLFLAG_RD, NULL, "Mac Statistics");
3125 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3127 struct i40e_eth_stats *eth_stats = &stats->eth;
3128 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3130 struct ixl_sysctl_info ctls[] =
3132 {&stats->crc_errors, "crc_errors", "CRC Errors"},
3133 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3134 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3135 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3136 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3137 /* Packet Reception Stats */
3138 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3139 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3140 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3141 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3142 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3143 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3144 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3145 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3146 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3147 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3148 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3149 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3150 /* Packet Transmission Stats */
3151 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3152 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3153 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3154 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3155 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3156 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3157 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3159 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3160 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3161 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3162 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3167 struct ixl_sysctl_info *entry = ctls;
3168 while (entry->stat != 0)
3170 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3171 CTLFLAG_RD, entry->stat,
3172 entry->description);
3178 ** ixl_config_rss - setup RSS
3179 ** - note this is done for the single vsi
3181 static void ixl_config_rss(struct ixl_vsi *vsi)
3183 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3184 struct i40e_hw *hw = vsi->hw;
3186 u64 set_hena = 0, hena;
3189 u32 rss_hash_config;
3190 u32 rss_seed[IXL_KEYSZ];
3192 u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
3193 0x183cfd8c, 0xce880440, 0x580cbc3c,
3194 0x35897377, 0x328b25e1, 0x4fa98922,
3195 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3199 /* Fetch the configured RSS key */
3200 rss_getkey((uint8_t *) &rss_seed);
3203 /* Fill out hash function seed */
3204 for (i = 0; i < IXL_KEYSZ; i++)
3205 wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3207 /* Enable PCTYPES for RSS: */
3209 rss_hash_config = rss_gethashconfig();
3210 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3211 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3212 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3213 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3214 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3215 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3216 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3217 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3218 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3219 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3220 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3221 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3222 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3223 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3226 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3227 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3228 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3229 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3230 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3231 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3232 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3233 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3234 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3235 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3236 ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3238 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3239 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3241 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3242 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3244 /* Populate the LUT with max no. of queues in round robin fashion */
3245 for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3246 if (j == vsi->num_queues)
3250 * Fetch the RSS bucket id for the given indirection entry.
3251 * Cap it at the number of configured buckets (which is
3254 que_id = rss_get_indirection_to_bucket(i);
3255 que_id = que_id % vsi->num_queues;
3259 /* lut = 4-byte sliding window of 4 lut entries */
3260 lut = (lut << 8) | (que_id &
3261 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3262 /* On i = 3, we have 4 entries in lut; write to the register */
3264 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3271 ** This routine is run via an vlan config EVENT,
3272 ** it enables us to use the HW Filter table since
3273 ** we can get the vlan id. This just creates the
3274 ** entry in the soft version of the VFTA, init will
3275 ** repopulate the real table.
3278 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3280 struct ixl_vsi *vsi = ifp->if_softc;
3281 struct i40e_hw *hw = vsi->hw;
3282 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3284 if (ifp->if_softc != arg) /* Not our event */
3287 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3292 ixl_add_filter(vsi, hw->mac.addr, vtag);
3297 ** This routine is run via an vlan
3298 ** unconfig EVENT, remove our entry
3299 ** in the soft vfta.
3302 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3304 struct ixl_vsi *vsi = ifp->if_softc;
3305 struct i40e_hw *hw = vsi->hw;
3306 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3308 if (ifp->if_softc != arg)
3311 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3316 ixl_del_filter(vsi, hw->mac.addr, vtag);
3321 ** This routine updates vlan filters, called by init
3322 ** it scans the filter table and then updates the hw
3323 ** after a soft reset.
3326 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3328 struct ixl_mac_filter *f;
3331 if (vsi->num_vlans == 0)
3334 ** Scan the filter list for vlan entries,
3335 ** mark them for addition and then call
3336 ** for the AQ update.
3338 SLIST_FOREACH(f, &vsi->ftl, next) {
3339 if (f->flags & IXL_FILTER_VLAN) {
3347 printf("setup vlan: no filters found!\n");
3350 flags = IXL_FILTER_VLAN;
3351 flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3352 ixl_add_hw_filters(vsi, flags, cnt);
3357 ** Initialize filter list and add filters that the hardware
3358 ** needs to know about.
3361 ixl_init_filters(struct ixl_vsi *vsi)
3363 /* Add broadcast address */
3364 u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3365 ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3369 ** This routine adds mulicast filters
3372 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3374 struct ixl_mac_filter *f;
3376 /* Does one already exist */
3377 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3381 f = ixl_get_filter(vsi);
3383 printf("WARNING: no filter available!!\n");
3386 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3387 f->vlan = IXL_VLAN_ANY;
3388 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3395 ** This routine adds macvlan filters
3398 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3400 struct ixl_mac_filter *f, *tmp;
3401 device_t dev = vsi->dev;
3403 DEBUGOUT("ixl_add_filter: begin");
3405 /* Does one already exist */
3406 f = ixl_find_filter(vsi, macaddr, vlan);
3410 ** Is this the first vlan being registered, if so we
3411 ** need to remove the ANY filter that indicates we are
3412 ** not in a vlan, and replace that with a 0 filter.
3414 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3415 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3417 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3418 ixl_add_filter(vsi, macaddr, 0);
3422 f = ixl_get_filter(vsi);
3424 device_printf(dev, "WARNING: no filter available!!\n");
3427 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3429 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3430 if (f->vlan != IXL_VLAN_ANY)
3431 f->flags |= IXL_FILTER_VLAN;
3433 ixl_add_hw_filters(vsi, f->flags, 1);
3438 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3440 struct ixl_mac_filter *f;
3442 f = ixl_find_filter(vsi, macaddr, vlan);
3446 f->flags |= IXL_FILTER_DEL;
3447 ixl_del_hw_filters(vsi, 1);
3449 /* Check if this is the last vlan removal */
3450 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3451 /* Switch back to a non-vlan filter */
3452 ixl_del_filter(vsi, macaddr, 0);
3453 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3459 ** Find the filter with both matching mac addr and vlan id
3461 static struct ixl_mac_filter *
3462 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3464 struct ixl_mac_filter *f;
3467 SLIST_FOREACH(f, &vsi->ftl, next) {
3468 if (!cmp_etheraddr(f->macaddr, macaddr))
3470 if (f->vlan == vlan) {
3482 ** This routine takes additions to the vsi filter
3483 ** table and creates an Admin Queue call to create
3484 ** the filters in the hardware.
3487 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3489 struct i40e_aqc_add_macvlan_element_data *a, *b;
3490 struct ixl_mac_filter *f;
3491 struct i40e_hw *hw = vsi->hw;
3492 device_t dev = vsi->dev;
3495 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3496 M_DEVBUF, M_NOWAIT | M_ZERO);
3498 device_printf(dev, "add_hw_filters failed to get memory\n");
3503 ** Scan the filter list, each time we find one
3504 ** we add it to the admin queue array and turn off
3507 SLIST_FOREACH(f, &vsi->ftl, next) {
3508 if (f->flags == flags) {
3509 b = &a[j]; // a pox on fvl long names :)
3510 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3512 (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3513 b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3514 f->flags &= ~IXL_FILTER_ADD;
3521 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3523 device_printf(dev, "aq_add_macvlan err %d, aq_error %d\n",
3524 err, hw->aq.asq_last_status);
3526 vsi->hw_filters_add += j;
3533 ** This routine takes removals in the vsi filter
3534 ** table and creates an Admin Queue call to delete
3535 ** the filters in the hardware.
3538 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3540 struct i40e_aqc_remove_macvlan_element_data *d, *e;
3541 struct i40e_hw *hw = vsi->hw;
3542 device_t dev = vsi->dev;
3543 struct ixl_mac_filter *f, *f_temp;
3546 DEBUGOUT("ixl_del_hw_filters: begin\n");
3548 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3549 M_DEVBUF, M_NOWAIT | M_ZERO);
3551 printf("del hw filter failed to get memory\n");
3555 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3556 if (f->flags & IXL_FILTER_DEL) {
3557 e = &d[j]; // a pox on fvl long names :)
3558 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3559 e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3560 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3561 /* delete entry from vsi list */
3562 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3570 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3571 /* NOTE: returns ENOENT every time but seems to work fine,
3572 so we'll ignore that specific error. */
3573 // TODO: Does this still occur on current firmwares?
3574 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3576 for (int i = 0; i < j; i++)
3577 sc += (!d[i].error_code);
3578 vsi->hw_filters_del += sc;
3580 "Failed to remove %d/%d filters, aq error %d\n",
3581 j - sc, j, hw->aq.asq_last_status);
3583 vsi->hw_filters_del += j;
3587 DEBUGOUT("ixl_del_hw_filters: end\n");
3593 ixl_enable_rings(struct ixl_vsi *vsi)
3595 struct i40e_hw *hw = vsi->hw;
3598 for (int i = 0; i < vsi->num_queues; i++) {
3599 i40e_pre_tx_queue_cfg(hw, i, TRUE);
3601 reg = rd32(hw, I40E_QTX_ENA(i));
3602 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3603 I40E_QTX_ENA_QENA_STAT_MASK;
3604 wr32(hw, I40E_QTX_ENA(i), reg);
3605 /* Verify the enable took */
3606 for (int j = 0; j < 10; j++) {
3607 reg = rd32(hw, I40E_QTX_ENA(i));
3608 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3610 i40e_msec_delay(10);
3612 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3613 printf("TX queue %d disabled!\n", i);
3615 reg = rd32(hw, I40E_QRX_ENA(i));
3616 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3617 I40E_QRX_ENA_QENA_STAT_MASK;
3618 wr32(hw, I40E_QRX_ENA(i), reg);
3619 /* Verify the enable took */
3620 for (int j = 0; j < 10; j++) {
3621 reg = rd32(hw, I40E_QRX_ENA(i));
3622 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3624 i40e_msec_delay(10);
3626 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3627 printf("RX queue %d disabled!\n", i);
3632 ixl_disable_rings(struct ixl_vsi *vsi)
3634 struct i40e_hw *hw = vsi->hw;
3637 for (int i = 0; i < vsi->num_queues; i++) {
3638 i40e_pre_tx_queue_cfg(hw, i, FALSE);
3639 i40e_usec_delay(500);
3641 reg = rd32(hw, I40E_QTX_ENA(i));
3642 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3643 wr32(hw, I40E_QTX_ENA(i), reg);
3644 /* Verify the disable took */
3645 for (int j = 0; j < 10; j++) {
3646 reg = rd32(hw, I40E_QTX_ENA(i));
3647 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3649 i40e_msec_delay(10);
3651 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3652 printf("TX queue %d still enabled!\n", i);
3654 reg = rd32(hw, I40E_QRX_ENA(i));
3655 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3656 wr32(hw, I40E_QRX_ENA(i), reg);
3657 /* Verify the disable took */
3658 for (int j = 0; j < 10; j++) {
3659 reg = rd32(hw, I40E_QRX_ENA(i));
3660 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3662 i40e_msec_delay(10);
3664 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3665 printf("RX queue %d still enabled!\n", i);
3670 * ixl_handle_mdd_event
3672 * Called from interrupt handler to identify possibly malicious vfs
3673 * (But also detects events from the PF, as well)
3675 static void ixl_handle_mdd_event(struct ixl_pf *pf)
3677 struct i40e_hw *hw = &pf->hw;
3678 device_t dev = pf->dev;
3679 bool mdd_detected = false;
3680 bool pf_mdd_detected = false;
3683 /* find what triggered the MDD event */
3684 reg = rd32(hw, I40E_GL_MDET_TX);
3685 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3686 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3687 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3688 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3689 I40E_GL_MDET_TX_EVENT_SHIFT;
3690 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3691 I40E_GL_MDET_TX_QUEUE_SHIFT;
3693 "Malicious Driver Detection event 0x%02x"
3694 " on TX queue %d pf number 0x%02x\n",
3695 event, queue, pf_num);
3696 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3697 mdd_detected = true;
3699 reg = rd32(hw, I40E_GL_MDET_RX);
3700 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3701 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3702 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3703 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3704 I40E_GL_MDET_RX_EVENT_SHIFT;
3705 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3706 I40E_GL_MDET_RX_QUEUE_SHIFT;
3708 "Malicious Driver Detection event 0x%02x"
3709 " on RX queue %d of function 0x%02x\n",
3710 event, queue, func);
3711 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3712 mdd_detected = true;
3716 reg = rd32(hw, I40E_PF_MDET_TX);
3717 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3718 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3720 "MDD TX event is for this function 0x%08x",
3722 pf_mdd_detected = true;
3724 reg = rd32(hw, I40E_PF_MDET_RX);
3725 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3726 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3728 "MDD RX event is for this function 0x%08x",
3730 pf_mdd_detected = true;
3734 /* re-enable mdd interrupt cause */
3735 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3736 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3737 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3742 ixl_enable_intr(struct ixl_vsi *vsi)
3744 struct i40e_hw *hw = vsi->hw;
3745 struct ixl_queue *que = vsi->queues;
3747 if (ixl_enable_msix) {
3748 ixl_enable_adminq(hw);
3749 for (int i = 0; i < vsi->num_queues; i++, que++)
3750 ixl_enable_queue(hw, que->me);
3752 ixl_enable_legacy(hw);
3756 ixl_disable_intr(struct ixl_vsi *vsi)
3758 struct i40e_hw *hw = vsi->hw;
3759 struct ixl_queue *que = vsi->queues;
3761 if (ixl_enable_msix) {
3762 ixl_disable_adminq(hw);
3763 for (int i = 0; i < vsi->num_queues; i++, que++)
3764 ixl_disable_queue(hw, que->me);
3766 ixl_disable_legacy(hw);
3770 ixl_enable_adminq(struct i40e_hw *hw)
3774 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3775 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3776 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3777 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3783 ixl_disable_adminq(struct i40e_hw *hw)
3787 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3788 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3794 ixl_enable_queue(struct i40e_hw *hw, int id)
3798 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3799 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3800 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3801 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3805 ixl_disable_queue(struct i40e_hw *hw, int id)
3809 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3810 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3816 ixl_enable_legacy(struct i40e_hw *hw)
3819 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3820 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3821 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3822 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3826 ixl_disable_legacy(struct i40e_hw *hw)
3830 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3831 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3837 ixl_update_stats_counters(struct ixl_pf *pf)
3839 struct i40e_hw *hw = &pf->hw;
3840 struct ixl_vsi *vsi = &pf->vsi;
3842 struct i40e_hw_port_stats *nsd = &pf->stats;
3843 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3845 /* Update hw stats */
3846 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3847 pf->stat_offsets_loaded,
3848 &osd->crc_errors, &nsd->crc_errors);
3849 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3850 pf->stat_offsets_loaded,
3851 &osd->illegal_bytes, &nsd->illegal_bytes);
3852 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3853 I40E_GLPRT_GORCL(hw->port),
3854 pf->stat_offsets_loaded,
3855 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3856 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3857 I40E_GLPRT_GOTCL(hw->port),
3858 pf->stat_offsets_loaded,
3859 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3860 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3861 pf->stat_offsets_loaded,
3862 &osd->eth.rx_discards,
3863 &nsd->eth.rx_discards);
3864 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3865 I40E_GLPRT_UPRCL(hw->port),
3866 pf->stat_offsets_loaded,
3867 &osd->eth.rx_unicast,
3868 &nsd->eth.rx_unicast);
3869 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3870 I40E_GLPRT_UPTCL(hw->port),
3871 pf->stat_offsets_loaded,
3872 &osd->eth.tx_unicast,
3873 &nsd->eth.tx_unicast);
3874 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3875 I40E_GLPRT_MPRCL(hw->port),
3876 pf->stat_offsets_loaded,
3877 &osd->eth.rx_multicast,
3878 &nsd->eth.rx_multicast);
3879 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3880 I40E_GLPRT_MPTCL(hw->port),
3881 pf->stat_offsets_loaded,
3882 &osd->eth.tx_multicast,
3883 &nsd->eth.tx_multicast);
3884 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3885 I40E_GLPRT_BPRCL(hw->port),
3886 pf->stat_offsets_loaded,
3887 &osd->eth.rx_broadcast,
3888 &nsd->eth.rx_broadcast);
3889 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3890 I40E_GLPRT_BPTCL(hw->port),
3891 pf->stat_offsets_loaded,
3892 &osd->eth.tx_broadcast,
3893 &nsd->eth.tx_broadcast);
3895 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3896 pf->stat_offsets_loaded,
3897 &osd->tx_dropped_link_down,
3898 &nsd->tx_dropped_link_down);
3899 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3900 pf->stat_offsets_loaded,
3901 &osd->mac_local_faults,
3902 &nsd->mac_local_faults);
3903 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3904 pf->stat_offsets_loaded,
3905 &osd->mac_remote_faults,
3906 &nsd->mac_remote_faults);
3907 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3908 pf->stat_offsets_loaded,
3909 &osd->rx_length_errors,
3910 &nsd->rx_length_errors);
3912 /* Flow control (LFC) stats */
3913 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3914 pf->stat_offsets_loaded,
3915 &osd->link_xon_rx, &nsd->link_xon_rx);
3916 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3917 pf->stat_offsets_loaded,
3918 &osd->link_xon_tx, &nsd->link_xon_tx);
3919 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3920 pf->stat_offsets_loaded,
3921 &osd->link_xoff_rx, &nsd->link_xoff_rx);
3922 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3923 pf->stat_offsets_loaded,
3924 &osd->link_xoff_tx, &nsd->link_xoff_tx);
3926 /* Packet size stats rx */
3927 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3928 I40E_GLPRT_PRC64L(hw->port),
3929 pf->stat_offsets_loaded,
3930 &osd->rx_size_64, &nsd->rx_size_64);
3931 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3932 I40E_GLPRT_PRC127L(hw->port),
3933 pf->stat_offsets_loaded,
3934 &osd->rx_size_127, &nsd->rx_size_127);
3935 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3936 I40E_GLPRT_PRC255L(hw->port),
3937 pf->stat_offsets_loaded,
3938 &osd->rx_size_255, &nsd->rx_size_255);
3939 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3940 I40E_GLPRT_PRC511L(hw->port),
3941 pf->stat_offsets_loaded,
3942 &osd->rx_size_511, &nsd->rx_size_511);
3943 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3944 I40E_GLPRT_PRC1023L(hw->port),
3945 pf->stat_offsets_loaded,
3946 &osd->rx_size_1023, &nsd->rx_size_1023);
3947 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3948 I40E_GLPRT_PRC1522L(hw->port),
3949 pf->stat_offsets_loaded,
3950 &osd->rx_size_1522, &nsd->rx_size_1522);
3951 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3952 I40E_GLPRT_PRC9522L(hw->port),
3953 pf->stat_offsets_loaded,
3954 &osd->rx_size_big, &nsd->rx_size_big);
3956 /* Packet size stats tx */
3957 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3958 I40E_GLPRT_PTC64L(hw->port),
3959 pf->stat_offsets_loaded,
3960 &osd->tx_size_64, &nsd->tx_size_64);
3961 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3962 I40E_GLPRT_PTC127L(hw->port),
3963 pf->stat_offsets_loaded,
3964 &osd->tx_size_127, &nsd->tx_size_127);
3965 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3966 I40E_GLPRT_PTC255L(hw->port),
3967 pf->stat_offsets_loaded,
3968 &osd->tx_size_255, &nsd->tx_size_255);
3969 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3970 I40E_GLPRT_PTC511L(hw->port),
3971 pf->stat_offsets_loaded,
3972 &osd->tx_size_511, &nsd->tx_size_511);
3973 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3974 I40E_GLPRT_PTC1023L(hw->port),
3975 pf->stat_offsets_loaded,
3976 &osd->tx_size_1023, &nsd->tx_size_1023);
3977 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3978 I40E_GLPRT_PTC1522L(hw->port),
3979 pf->stat_offsets_loaded,
3980 &osd->tx_size_1522, &nsd->tx_size_1522);
3981 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3982 I40E_GLPRT_PTC9522L(hw->port),
3983 pf->stat_offsets_loaded,
3984 &osd->tx_size_big, &nsd->tx_size_big);
3986 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3987 pf->stat_offsets_loaded,
3988 &osd->rx_undersize, &nsd->rx_undersize);
3989 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3990 pf->stat_offsets_loaded,
3991 &osd->rx_fragments, &nsd->rx_fragments);
3992 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3993 pf->stat_offsets_loaded,
3994 &osd->rx_oversize, &nsd->rx_oversize);
3995 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3996 pf->stat_offsets_loaded,
3997 &osd->rx_jabber, &nsd->rx_jabber);
3998 pf->stat_offsets_loaded = true;
4001 /* Update vsi stats */
4002 ixl_update_eth_stats(vsi);
4005 // ERJ - these are per-port, update all vsis?
4006 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
4010 ** Tasklet handler for MSIX Adminq interrupts
4011 ** - do outside interrupt since it might sleep
4014 ixl_do_adminq(void *context, int pending)
4016 struct ixl_pf *pf = context;
4017 struct i40e_hw *hw = &pf->hw;
4018 struct ixl_vsi *vsi = &pf->vsi;
4019 struct i40e_arq_event_info event;
4024 event.buf_len = IXL_AQ_BUF_SZ;
4025 event.msg_buf = malloc(event.buf_len,
4026 M_DEVBUF, M_NOWAIT | M_ZERO);
4027 if (!event.msg_buf) {
4028 printf("Unable to allocate adminq memory\n");
4032 /* clean and process any events */
4034 ret = i40e_clean_arq_element(hw, &event, &result);
4037 opcode = LE16_TO_CPU(event.desc.opcode);
4039 case i40e_aqc_opc_get_link_status:
4040 vsi->link_up = ixl_config_link(hw);
4041 ixl_update_link_status(pf);
4043 case i40e_aqc_opc_send_msg_to_pf:
4044 /* process pf/vf communication here */
4046 case i40e_aqc_opc_event_lan_overflow:
4050 printf("AdminQ unknown event %x\n", opcode);
4055 } while (result && (loop++ < IXL_ADM_LIMIT));
4057 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4058 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4059 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4060 free(event.msg_buf, M_DEVBUF);
4063 ixl_enable_adminq(&pf->hw);
4065 ixl_enable_intr(vsi);
4069 ixl_debug_info(SYSCTL_HANDLER_ARGS)
4072 int error, input = 0;
4074 error = sysctl_handle_int(oidp, &input, 0, req);
4076 if (error || !req->newptr)
4080 pf = (struct ixl_pf *)arg1;
4081 ixl_print_debug_info(pf);
4088 ixl_print_debug_info(struct ixl_pf *pf)
4090 struct i40e_hw *hw = &pf->hw;
4091 struct ixl_vsi *vsi = &pf->vsi;
4092 struct ixl_queue *que = vsi->queues;
4093 struct rx_ring *rxr = &que->rxr;
4094 struct tx_ring *txr = &que->txr;
4098 printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4099 printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4100 printf("RX next check = %x\n", rxr->next_check);
4101 printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4102 printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4103 printf("TX desc avail = %x\n", txr->avail);
4105 reg = rd32(hw, I40E_GLV_GORCL(0xc));
4106 printf("RX Bytes = %x\n", reg);
4107 reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4108 printf("Port RX Bytes = %x\n", reg);
4109 reg = rd32(hw, I40E_GLV_RDPC(0xc));
4110 printf("RX discard = %x\n", reg);
4111 reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4112 printf("Port RX discard = %x\n", reg);
4114 reg = rd32(hw, I40E_GLV_TEPC(0xc));
4115 printf("TX errors = %x\n", reg);
4116 reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4117 printf("TX Bytes = %x\n", reg);
4119 reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4120 printf("RX undersize = %x\n", reg);
4121 reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4122 printf("RX fragments = %x\n", reg);
4123 reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4124 printf("RX oversize = %x\n", reg);
4125 reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4126 printf("RX length error = %x\n", reg);
4127 reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4128 printf("mac remote fault = %x\n", reg);
4129 reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4130 printf("mac local fault = %x\n", reg);
4134 * Update VSI-specific ethernet statistics counters.
4136 void ixl_update_eth_stats(struct ixl_vsi *vsi)
4138 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4139 struct i40e_hw *hw = &pf->hw;
4140 struct i40e_eth_stats *es;
4141 struct i40e_eth_stats *oes;
4143 uint64_t tx_discards;
4144 struct i40e_hw_port_stats *nsd;
4145 u16 stat_idx = vsi->info.stat_counter_idx;
4147 es = &vsi->eth_stats;
4148 oes = &vsi->eth_stats_offsets;
4151 /* Gather up the stats that the hw collects */
4152 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4153 vsi->stat_offsets_loaded,
4154 &oes->tx_errors, &es->tx_errors);
4155 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4156 vsi->stat_offsets_loaded,
4157 &oes->rx_discards, &es->rx_discards);
4159 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4160 I40E_GLV_GORCL(stat_idx),
4161 vsi->stat_offsets_loaded,
4162 &oes->rx_bytes, &es->rx_bytes);
4163 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4164 I40E_GLV_UPRCL(stat_idx),
4165 vsi->stat_offsets_loaded,
4166 &oes->rx_unicast, &es->rx_unicast);
4167 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4168 I40E_GLV_MPRCL(stat_idx),
4169 vsi->stat_offsets_loaded,
4170 &oes->rx_multicast, &es->rx_multicast);
4171 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4172 I40E_GLV_BPRCL(stat_idx),
4173 vsi->stat_offsets_loaded,
4174 &oes->rx_broadcast, &es->rx_broadcast);
4176 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4177 I40E_GLV_GOTCL(stat_idx),
4178 vsi->stat_offsets_loaded,
4179 &oes->tx_bytes, &es->tx_bytes);
4180 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4181 I40E_GLV_UPTCL(stat_idx),
4182 vsi->stat_offsets_loaded,
4183 &oes->tx_unicast, &es->tx_unicast);
4184 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4185 I40E_GLV_MPTCL(stat_idx),
4186 vsi->stat_offsets_loaded,
4187 &oes->tx_multicast, &es->tx_multicast);
4188 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4189 I40E_GLV_BPTCL(stat_idx),
4190 vsi->stat_offsets_loaded,
4191 &oes->tx_broadcast, &es->tx_broadcast);
4192 vsi->stat_offsets_loaded = true;
4194 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4195 for (i = 0; i < vsi->num_queues; i++)
4196 tx_discards += vsi->queues[i].txr.br->br_drops;
4198 /* Update ifnet stats */
4199 IXL_SET_IPACKETS(vsi, es->rx_unicast +
4202 IXL_SET_OPACKETS(vsi, es->tx_unicast +
4205 IXL_SET_IBYTES(vsi, es->rx_bytes);
4206 IXL_SET_OBYTES(vsi, es->tx_bytes);
4207 IXL_SET_IMCASTS(vsi, es->rx_multicast);
4208 IXL_SET_OMCASTS(vsi, es->tx_multicast);
4210 IXL_SET_OERRORS(vsi, es->tx_errors);
4211 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4212 IXL_SET_OQDROPS(vsi, tx_discards);
4213 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4214 IXL_SET_COLLISIONS(vsi, 0);
4218 * Reset all of the stats for the given pf
4220 void ixl_pf_reset_stats(struct ixl_pf *pf)
4222 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4223 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4224 pf->stat_offsets_loaded = false;
4228 * Resets all stats of the given vsi
4230 void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4232 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4233 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4234 vsi->stat_offsets_loaded = false;
4238 * Read and update a 48 bit stat from the hw
4240 * Since the device stats are not reset at PFReset, they likely will not
4241 * be zeroed when the driver starts. We'll save the first values read
4242 * and use them as offsets to be subtracted from the raw values in order
4243 * to report stats that count from zero.
4246 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4247 bool offset_loaded, u64 *offset, u64 *stat)
4251 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4252 new_data = rd64(hw, loreg);
4255 * Use two rd32's instead of one rd64; FreeBSD versions before
4256 * 10 don't support 8 byte bus reads/writes.
4258 new_data = rd32(hw, loreg);
4259 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4264 if (new_data >= *offset)
4265 *stat = new_data - *offset;
4267 *stat = (new_data + ((u64)1 << 48)) - *offset;
4268 *stat &= 0xFFFFFFFFFFFFULL;
4272 * Read and update a 32 bit stat from the hw
4275 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4276 bool offset_loaded, u64 *offset, u64 *stat)
4280 new_data = rd32(hw, reg);
4283 if (new_data >= *offset)
4284 *stat = (u32)(new_data - *offset);
4286 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4290 ** Set flow control using sysctl:
4297 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4300 * TODO: ensure flow control is disabled if
4301 * priority flow control is enabled
4303 * TODO: ensure tx CRC by hardware should be enabled
4304 * if tx flow control is enabled.
4306 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4307 struct i40e_hw *hw = &pf->hw;
4308 device_t dev = pf->dev;
4309 int requested_fc = 0, error = 0;
4310 enum i40e_status_code aq_error = 0;
4313 aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4316 "%s: Error retrieving link info from aq, %d\n",
4317 __func__, aq_error);
4321 /* Read in new mode */
4322 requested_fc = hw->fc.current_mode;
4323 error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4324 if ((error) || (req->newptr == NULL))
4326 if (requested_fc < 0 || requested_fc > 3) {
4328 "Invalid fc mode; valid modes are 0 through 3\n");
4333 ** Changing flow control mode currently does not work on
4336 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4337 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4338 device_printf(dev, "Changing flow control mode unsupported"
4339 " on 40GBase-CR4 media.\n");
4343 /* Set fc ability for port */
4344 hw->fc.requested_mode = requested_fc;
4345 aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4348 "%s: Error setting new fc mode %d; fc_err %#x\n",
4349 __func__, aq_error, fc_aq_err);
4353 if (hw->fc.current_mode != hw->fc.requested_mode) {
4354 device_printf(dev, "%s: FC set failure:\n", __func__);
4355 device_printf(dev, "%s: Current: %s / Requested: %s\n",
4357 ixl_fc_string[hw->fc.current_mode],
4358 ixl_fc_string[hw->fc.requested_mode]);
4365 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4367 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4368 struct i40e_hw *hw = &pf->hw;
4369 int error = 0, index = 0;
4380 ixl_update_link_status(pf);
4382 switch (hw->phy.link_info.link_speed) {
4383 case I40E_LINK_SPEED_100MB:
4386 case I40E_LINK_SPEED_1GB:
4389 case I40E_LINK_SPEED_10GB:
4392 case I40E_LINK_SPEED_40GB:
4395 case I40E_LINK_SPEED_20GB:
4398 case I40E_LINK_SPEED_UNKNOWN:
4404 error = sysctl_handle_string(oidp, speeds[index],
4405 strlen(speeds[index]), req);
4410 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4412 struct i40e_hw *hw = &pf->hw;
4413 device_t dev = pf->dev;
4414 struct i40e_aq_get_phy_abilities_resp abilities;
4415 struct i40e_aq_set_phy_config config;
4416 enum i40e_status_code aq_error = 0;
4418 /* Get current capability information */
4419 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4421 device_printf(dev, "%s: Error getting phy capabilities %d,"
4422 " aq error: %d\n", __func__, aq_error,
4423 hw->aq.asq_last_status);
4427 /* Prepare new config */
4428 bzero(&config, sizeof(config));
4429 config.phy_type = abilities.phy_type;
4430 config.abilities = abilities.abilities
4431 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4432 config.eee_capability = abilities.eee_capability;
4433 config.eeer = abilities.eeer_val;
4434 config.low_power_ctrl = abilities.d3_lpan;
4435 /* Translate into aq cmd link_speed */
4437 config.link_speed |= I40E_LINK_SPEED_10GB;
4439 config.link_speed |= I40E_LINK_SPEED_1GB;
4441 config.link_speed |= I40E_LINK_SPEED_100MB;
4443 /* Do aq command & restart link */
4444 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4446 device_printf(dev, "%s: Error setting new phy config %d,"
4447 " aq error: %d\n", __func__, aq_error,
4448 hw->aq.asq_last_status);
4453 ** This seems a bit heavy handed, but we
4454 ** need to get a reinit on some devices
4458 ixl_init_locked(pf);
4465 ** Control link advertise speed:
4467 ** 0x1 - advertise 100 Mb
4468 ** 0x2 - advertise 1G
4469 ** 0x4 - advertise 10G
4471 ** Does not work on 40G devices.
4474 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4476 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4477 struct i40e_hw *hw = &pf->hw;
4478 device_t dev = pf->dev;
4479 int requested_ls = 0;
4483 ** FW doesn't support changing advertised speed
4484 ** for 40G devices; speed is always 40G.
4486 if (i40e_is_40G_device(hw->device_id))
4489 /* Read in new mode */
4490 requested_ls = pf->advertised_speed;
4491 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4492 if ((error) || (req->newptr == NULL))
4494 if (requested_ls < 1 || requested_ls > 7) {
4496 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4500 /* Exit if no change */
4501 if (pf->advertised_speed == requested_ls)
4504 error = ixl_set_advertised_speeds(pf, requested_ls);
4508 pf->advertised_speed = requested_ls;
4509 ixl_update_link_status(pf);
4514 ** Get the width and transaction speed of
4515 ** the bus this adapter is plugged into.
4518 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4524 /* Get the PCI Express Capabilities offset */
4525 pci_find_cap(dev, PCIY_EXPRESS, &offset);
4527 /* ...and read the Link Status Register */
4528 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4530 switch (link & I40E_PCI_LINK_WIDTH) {
4531 case I40E_PCI_LINK_WIDTH_1:
4532 hw->bus.width = i40e_bus_width_pcie_x1;
4534 case I40E_PCI_LINK_WIDTH_2:
4535 hw->bus.width = i40e_bus_width_pcie_x2;
4537 case I40E_PCI_LINK_WIDTH_4:
4538 hw->bus.width = i40e_bus_width_pcie_x4;
4540 case I40E_PCI_LINK_WIDTH_8:
4541 hw->bus.width = i40e_bus_width_pcie_x8;
4544 hw->bus.width = i40e_bus_width_unknown;
4548 switch (link & I40E_PCI_LINK_SPEED) {
4549 case I40E_PCI_LINK_SPEED_2500:
4550 hw->bus.speed = i40e_bus_speed_2500;
4552 case I40E_PCI_LINK_SPEED_5000:
4553 hw->bus.speed = i40e_bus_speed_5000;
4555 case I40E_PCI_LINK_SPEED_8000:
4556 hw->bus.speed = i40e_bus_speed_8000;
4559 hw->bus.speed = i40e_bus_speed_unknown;
4564 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4565 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4566 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4567 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4568 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4569 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4570 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4573 if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4574 (hw->bus.speed < i40e_bus_speed_8000)) {
4575 device_printf(dev, "PCI-Express bandwidth available"
4576 " for this device\n is not sufficient for"
4577 " normal operation.\n");
4578 device_printf(dev, "For expected performance a x8 "
4579 "PCIE Gen3 slot is required.\n");
4586 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4588 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4589 struct i40e_hw *hw = &pf->hw;
4592 snprintf(buf, sizeof(buf),
4593 "f%d.%d a%d.%d n%02x.%02x e%08x",
4594 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4595 hw->aq.api_maj_ver, hw->aq.api_min_ver,
4596 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4597 IXL_NVM_VERSION_HI_SHIFT,
4598 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4599 IXL_NVM_VERSION_LO_SHIFT,
4601 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4605 #ifdef IXL_DEBUG_SYSCTL
4607 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4609 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4610 struct i40e_hw *hw = &pf->hw;
4611 struct i40e_link_status link_status;
4614 enum i40e_status_code aq_error = 0;
4616 aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4618 printf("i40e_aq_get_link_info() error %d\n", aq_error);
4623 "PHY Type : %#04x\n"
4625 "Link info: %#04x\n"
4628 link_status.phy_type, link_status.link_speed,
4629 link_status.link_info, link_status.an_info,
4630 link_status.ext_info);
4632 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4636 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4638 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4639 struct i40e_hw *hw = &pf->hw;
4640 struct i40e_aq_get_phy_abilities_resp abilities_resp;
4643 enum i40e_status_code aq_error = 0;
4645 // TODO: Print out list of qualified modules as well?
4646 aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4648 printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4653 "PHY Type : %#010x\n"
4655 "Abilities: %#04x\n"
4657 "EEER reg : %#010x\n"
4659 abilities_resp.phy_type, abilities_resp.link_speed,
4660 abilities_resp.abilities, abilities_resp.eee_capability,
4661 abilities_resp.eeer_val, abilities_resp.d3_lpan);
4663 return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4667 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4669 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4670 struct ixl_vsi *vsi = &pf->vsi;
4671 struct ixl_mac_filter *f;
4676 int ftl_counter = 0;
4680 SLIST_FOREACH(f, &vsi->ftl, next) {
4685 sysctl_handle_string(oidp, "(none)", 6, req);
4689 buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4690 buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4692 sprintf(buf_i++, "\n");
4693 SLIST_FOREACH(f, &vsi->ftl, next) {
4695 MAC_FORMAT ", vlan %4d, flags %#06x",
4696 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4698 /* don't print '\n' for last entry */
4699 if (++ftl_counter != ftl_len) {
4700 sprintf(buf_i, "\n");
4705 error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4707 printf("sysctl error: %d\n", error);
4708 free(buf, M_DEVBUF);
4712 #define IXL_SW_RES_SIZE 0x14
4714 ixl_res_alloc_cmp(const void *a, const void *b)
4716 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4717 one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4718 two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4720 return ((int)one->resource_type - (int)two->resource_type);
4724 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4726 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4727 struct i40e_hw *hw = &pf->hw;
4728 device_t dev = pf->dev;
4733 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4735 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4737 device_printf(dev, "Could not allocate sbuf for output.\n");
4741 bzero(resp, sizeof(resp));
4742 error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4747 device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4748 __func__, error, hw->aq.asq_last_status);
4753 /* Sort entries by type for display */
4754 qsort(resp, num_entries,
4755 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4756 &ixl_res_alloc_cmp);
4758 sbuf_cat(buf, "\n");
4759 sbuf_printf(buf, "# of entries: %d\n", num_entries);
4761 "Type | Guaranteed | Total | Used | Un-allocated\n"
4762 " | (this) | (all) | (this) | (all) \n");
4763 for (int i = 0; i < num_entries; i++) {
4765 "%#4x | %10d %5d %6d %12d",
4766 resp[i].resource_type,
4770 resp[i].total_unalloced);
4771 if (i < num_entries - 1)
4772 sbuf_cat(buf, "\n");
4775 error = sbuf_finish(buf);
4782 ** Caller must init and delete sbuf; this function will clear and
4783 ** finish it for caller.
4786 ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
4790 if (seid == 0 && uplink)
4791 sbuf_cat(s, "Network");
4793 sbuf_cat(s, "Host");
4797 sbuf_printf(s, "MAC %d", seid - 2);
4798 else if (seid <= 15)
4799 sbuf_cat(s, "Reserved");
4800 else if (seid <= 31)
4801 sbuf_printf(s, "PF %d", seid - 16);
4802 else if (seid <= 159)
4803 sbuf_printf(s, "VF %d", seid - 32);
4804 else if (seid <= 287)
4805 sbuf_cat(s, "Reserved");
4806 else if (seid <= 511)
4807 sbuf_cat(s, "Other"); // for other structures
4808 else if (seid <= 895)
4809 sbuf_printf(s, "VSI %d", seid - 512);
4810 else if (seid <= 1023)
4811 sbuf_printf(s, "Reserved");
4813 sbuf_cat(s, "Invalid");
4816 return sbuf_data(s);
4820 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
4822 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4823 struct i40e_hw *hw = &pf->hw;
4824 device_t dev = pf->dev;
4828 u8 aq_buf[I40E_AQ_LARGE_BUF];
4831 struct i40e_aqc_get_switch_config_resp *sw_config;
4832 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
4834 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4836 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
4840 error = i40e_aq_get_switch_config(hw, sw_config,
4841 sizeof(aq_buf), &next, NULL);
4843 device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
4844 __func__, error, hw->aq.asq_last_status);
4849 nmbuf = sbuf_new_auto();
4851 device_printf(dev, "Could not allocate sbuf for name output.\n");
4855 sbuf_cat(buf, "\n");
4856 // Assuming <= 255 elements in switch
4857 sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
4859 ** Revision -- all elements are revision 1 for now
4862 "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
4863 " | | | (uplink)\n");
4864 for (int i = 0; i < sw_config->header.num_reported; i++) {
4865 // "%4d (%8s) | %8s %8s %#8x",
4866 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
4868 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
4869 sbuf_cat(buf, " | ");
4870 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
4872 sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
4874 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
4875 if (i < sw_config->header.num_reported - 1)
4876 sbuf_cat(buf, "\n");
4880 error = sbuf_finish(buf);
4887 ** Dump TX desc given index.
4888 ** Doesn't work; don't use.
4889 ** TODO: Also needs a queue index input!
4892 ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4894 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4895 device_t dev = pf->dev;
4901 buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4903 device_printf(dev, "Could not allocate sbuf for output.\n");
4908 error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4911 if (req->newptr == NULL)
4912 return (EIO); // fix
4913 if (desc_idx > 1024) { // fix
4915 "Invalid descriptor index, needs to be < 1024\n"); // fix
4919 // Don't use this sysctl yet
4923 sbuf_cat(buf, "\n");
4926 struct ixl_queue *que = pf->vsi.queues;
4927 struct tx_ring *txr = &(que[1].txr);
4928 struct i40e_tx_desc *txd = &txr->base[desc_idx];
4930 sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4931 sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4932 sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4934 error = sbuf_finish(buf);
4936 device_printf(dev, "Error finishing sbuf: %d\n", error);
4941 error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4943 device_printf(dev, "sysctl error: %d\n", error);
4947 #endif /* IXL_DEBUG_SYSCTL */